diff --git a/api/controllers/console/workspace/model_providers.py b/api/controllers/console/workspace/model_providers.py index fe0bcf7338..03d11b5619 100644 --- a/api/controllers/console/workspace/model_providers.py +++ b/api/controllers/console/workspace/model_providers.py @@ -132,7 +132,7 @@ class ModelProviderIconApi(Resource): def get(self, provider: str, icon_type: str, lang: str): model_provider_service = ModelProviderService() icon, mimetype = model_provider_service.get_model_provider_icon( - provider=provider, icon_type=icon_type, lang=lang + tenant_id=current_user.current_tenant_id, provider=provider, icon_type=icon_type, lang=lang ) return send_file(io.BytesIO(icon), mimetype=mimetype) diff --git a/api/core/app/app_config/easy_ui_based_app/model_config/converter.py b/api/core/app/app_config/easy_ui_based_app/model_config/converter.py index a91b9f0f02..c72c838d06 100644 --- a/api/core/app/app_config/easy_ui_based_app/model_config/converter.py +++ b/api/core/app/app_config/easy_ui_based_app/model_config/converter.py @@ -4,7 +4,8 @@ from core.app.app_config.entities import EasyUIBasedAppConfig from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.entities.model_entities import ModelStatus from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError -from core.model_runtime.entities.model_entities import ModelType +from core.model_runtime.entities.llm_entities import LLMMode +from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.provider_manager import ProviderManager @@ -67,14 +68,14 @@ class ModelConfigConverter: stop = completion_params["stop"] del completion_params["stop"] + model_schema = model_type_instance.get_model_schema(model_config.model, model_credentials) + # get model mode model_mode = model_config.mode if not model_mode: - mode_enum = model_type_instance.get_model_mode(model=model_config.model, credentials=model_credentials) - - model_mode = mode_enum.value - - model_schema = model_type_instance.get_model_schema(model_config.model, model_credentials) + model_mode = LLMMode.CHAT.value + if model_schema and model_schema.model_properties.get(ModelPropertyKey.MODE): + model_mode = LLMMode.value_of(model_schema.model_properties[ModelPropertyKey.MODE]).value if not skip_check and not model_schema: raise ValueError(f"Model {model_name} not exist.") diff --git a/api/core/app/app_config/easy_ui_based_app/model_config/manager.py b/api/core/app/app_config/easy_ui_based_app/model_config/manager.py index b5e4554181..acc1a2d35b 100644 --- a/api/core/app/app_config/easy_ui_based_app/model_config/manager.py +++ b/api/core/app/app_config/easy_ui_based_app/model_config/manager.py @@ -1,6 +1,6 @@ from core.app.app_config.entities import ModelConfigEntity from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType -from core.model_runtime.model_providers import model_provider_factory +from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory from core.provider_manager import ProviderManager @@ -50,6 +50,7 @@ class ModelConfigManager: raise ValueError("model must be of object type") # model.provider + model_provider_factory = ModelProviderFactory(tenant_id) provider_entities = model_provider_factory.get_providers() model_provider_names = [provider.provider for provider in provider_entities] if "provider" not in config["model"] or config["model"]["provider"] not in model_provider_names: diff --git a/api/core/entities/provider_configuration.py b/api/core/entities/provider_configuration.py index 807f09598c..764221dec5 100644 --- a/api/core/entities/provider_configuration.py +++ b/api/core/entities/provider_configuration.py @@ -2,7 +2,7 @@ import datetime import json import logging from collections import defaultdict -from collections.abc import Iterator +from collections.abc import Iterator, Sequence from json import JSONDecodeError from typing import Optional @@ -18,16 +18,15 @@ from core.entities.provider_entities import ( ) from core.helper import encrypter from core.helper.model_provider_cache import ProviderCredentialsCache, ProviderCredentialsCacheType -from core.model_runtime.entities.model_entities import FetchFrom, ModelType +from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType from core.model_runtime.entities.provider_entities import ( ConfigurateMethod, CredentialFormSchema, FormType, ProviderEntity, ) -from core.model_runtime.model_providers import model_provider_factory from core.model_runtime.model_providers.__base.ai_model import AIModel -from core.model_runtime.model_providers.__base.model_provider import ModelProvider +from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory from extensions.ext_database import db from models.provider import ( LoadBalancingModelConfig, @@ -100,7 +99,9 @@ class ProviderConfiguration(BaseModel): restrict_models = quota_configuration.restrict_models - copy_credentials = self.system_configuration.credentials.copy() + copy_credentials = ( + self.system_configuration.credentials.copy() if self.system_configuration.credentials else {} + ) if restrict_models: for restrict_model in restrict_models: if ( @@ -137,6 +138,9 @@ class ProviderConfiguration(BaseModel): (q for q in self.system_configuration.quota_configurations if q.quota_type == current_quota_type), None ) + if not current_quota_configuration: + return SystemConfigurationStatus.UNSUPPORTED + return ( SystemConfigurationStatus.ACTIVE if current_quota_configuration.is_valid @@ -172,7 +176,7 @@ class ProviderConfiguration(BaseModel): else [], ) - def custom_credentials_validate(self, credentials: dict) -> tuple[Provider, dict]: + def custom_credentials_validate(self, credentials: dict) -> tuple[Provider | None, dict]: """ Validate custom credentials. :param credentials: provider credentials @@ -216,6 +220,7 @@ class ProviderConfiguration(BaseModel): if value == HIDDEN_VALUE and key in original_credentials: credentials[key] = encrypter.decrypt_token(self.tenant_id, original_credentials[key]) + model_provider_factory = ModelProviderFactory(self.tenant_id) credentials = model_provider_factory.provider_credentials_validate( provider=self.provider.provider, credentials=credentials ) @@ -243,13 +248,13 @@ class ProviderConfiguration(BaseModel): provider_record.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) db.session.commit() else: - provider_record = Provider( - tenant_id=self.tenant_id, - provider_name=self.provider.provider, - provider_type=ProviderType.CUSTOM.value, - encrypted_config=json.dumps(credentials), - is_valid=True, - ) + provider_record = Provider() + provider_record.tenant_id = self.tenant_id + provider_record.provider_name = self.provider.provider + provider_record.provider_type = ProviderType.CUSTOM.value + provider_record.encrypted_config = json.dumps(credentials) + provider_record.is_valid = True + db.session.add(provider_record) db.session.commit() @@ -324,7 +329,7 @@ class ProviderConfiguration(BaseModel): def custom_model_credentials_validate( self, model_type: ModelType, model: str, credentials: dict - ) -> tuple[ProviderModel, dict]: + ) -> tuple[ProviderModel | None, dict]: """ Validate custom model credentials. @@ -367,6 +372,7 @@ class ProviderConfiguration(BaseModel): if value == HIDDEN_VALUE and key in original_credentials: credentials[key] = encrypter.decrypt_token(self.tenant_id, original_credentials[key]) + model_provider_factory = ModelProviderFactory(self.tenant_id) credentials = model_provider_factory.model_credentials_validate( provider=self.provider.provider, model_type=model_type, model=model, credentials=credentials ) @@ -397,14 +403,13 @@ class ProviderConfiguration(BaseModel): provider_model_record.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) db.session.commit() else: - provider_model_record = ProviderModel( - tenant_id=self.tenant_id, - provider_name=self.provider.provider, - model_name=model, - model_type=model_type.to_origin_model_type(), - encrypted_config=json.dumps(credentials), - is_valid=True, - ) + provider_model_record = ProviderModel() + provider_model_record.tenant_id = self.tenant_id + provider_model_record.provider_name = self.provider.provider + provider_model_record.model_name = model + provider_model_record.model_type = model_type.to_origin_model_type() + provider_model_record.encrypted_config = json.dumps(credentials) + provider_model_record.is_valid = True db.session.add(provider_model_record) db.session.commit() @@ -471,13 +476,12 @@ class ProviderConfiguration(BaseModel): model_setting.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) db.session.commit() else: - model_setting = ProviderModelSetting( - tenant_id=self.tenant_id, - provider_name=self.provider.provider, - model_type=model_type.to_origin_model_type(), - model_name=model, - enabled=True, - ) + model_setting = ProviderModelSetting() + model_setting.tenant_id = self.tenant_id + model_setting.provider_name = self.provider.provider + model_setting.model_type = model_type.to_origin_model_type() + model_setting.model_name = model + model_setting.enabled = True db.session.add(model_setting) db.session.commit() @@ -506,13 +510,12 @@ class ProviderConfiguration(BaseModel): model_setting.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) db.session.commit() else: - model_setting = ProviderModelSetting( - tenant_id=self.tenant_id, - provider_name=self.provider.provider, - model_type=model_type.to_origin_model_type(), - model_name=model, - enabled=False, - ) + model_setting = ProviderModelSetting() + model_setting.tenant_id = self.tenant_id + model_setting.provider_name = self.provider.provider + model_setting.model_type = model_type.to_origin_model_type() + model_setting.model_name = model + model_setting.enabled = False db.session.add(model_setting) db.session.commit() @@ -573,13 +576,12 @@ class ProviderConfiguration(BaseModel): model_setting.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) db.session.commit() else: - model_setting = ProviderModelSetting( - tenant_id=self.tenant_id, - provider_name=self.provider.provider, - model_type=model_type.to_origin_model_type(), - model_name=model, - load_balancing_enabled=True, - ) + model_setting = ProviderModelSetting() + model_setting.tenant_id = self.tenant_id + model_setting.provider_name = self.provider.provider + model_setting.model_type = model_type.to_origin_model_type() + model_setting.model_name = model + model_setting.load_balancing_enabled = True db.session.add(model_setting) db.session.commit() @@ -608,25 +610,17 @@ class ProviderConfiguration(BaseModel): model_setting.updated_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) db.session.commit() else: - model_setting = ProviderModelSetting( - tenant_id=self.tenant_id, - provider_name=self.provider.provider, - model_type=model_type.to_origin_model_type(), - model_name=model, - load_balancing_enabled=False, - ) + model_setting = ProviderModelSetting() + model_setting.tenant_id = self.tenant_id + model_setting.provider_name = self.provider.provider + model_setting.model_type = model_type.to_origin_model_type() + model_setting.model_name = model + model_setting.load_balancing_enabled = False db.session.add(model_setting) db.session.commit() return model_setting - def get_provider_instance(self) -> ModelProvider: - """ - Get provider instance. - :return: - """ - return model_provider_factory.get_provider_instance(self.provider.provider) - def get_model_type_instance(self, model_type: ModelType) -> AIModel: """ Get current model type instance. @@ -634,11 +628,19 @@ class ProviderConfiguration(BaseModel): :param model_type: model type :return: """ - # Get provider instance - provider_instance = self.get_provider_instance() + model_provider_factory = ModelProviderFactory(self.tenant_id) # Get model instance of LLM - return provider_instance.get_model_instance(model_type) + return model_provider_factory.get_model_type_instance(provider=self.provider.provider, model_type=model_type) + + def get_model_schema(self, model_type: ModelType, model: str, credentials: dict) -> AIModelEntity | None: + """ + Get model schema + """ + model_provider_factory = ModelProviderFactory(self.tenant_id) + return model_provider_factory.get_model_schema( + provider=self.provider.provider, model_type=model_type, model=model, credentials=credentials + ) def switch_preferred_provider_type(self, provider_type: ProviderType) -> None: """ @@ -665,11 +667,10 @@ class ProviderConfiguration(BaseModel): if preferred_model_provider: preferred_model_provider.preferred_provider_type = provider_type.value else: - preferred_model_provider = TenantPreferredModelProvider( - tenant_id=self.tenant_id, - provider_name=self.provider.provider, - preferred_provider_type=provider_type.value, - ) + preferred_model_provider = TenantPreferredModelProvider() + preferred_model_provider.tenant_id = self.tenant_id + preferred_model_provider.provider_name = self.provider.provider + preferred_model_provider.preferred_provider_type = provider_type.value db.session.add(preferred_model_provider) db.session.commit() @@ -734,13 +735,14 @@ class ProviderConfiguration(BaseModel): :param only_active: only active models :return: """ - provider_instance = self.get_provider_instance() + model_provider_factory = ModelProviderFactory(self.tenant_id) + provider_schema = model_provider_factory.get_provider_schema(self.provider.provider) model_types = [] if model_type: model_types.append(model_type) else: - model_types = provider_instance.get_provider_schema().supported_model_types + model_types = provider_schema.supported_model_types # Group model settings by model type and model model_setting_map = defaultdict(dict) @@ -749,11 +751,11 @@ class ProviderConfiguration(BaseModel): if self.using_provider_type == ProviderType.SYSTEM: provider_models = self._get_system_provider_models( - model_types=model_types, provider_instance=provider_instance, model_setting_map=model_setting_map + model_types=model_types, provider_schema=provider_schema, model_setting_map=model_setting_map ) else: provider_models = self._get_custom_provider_models( - model_types=model_types, provider_instance=provider_instance, model_setting_map=model_setting_map + model_types=model_types, provider_schema=provider_schema, model_setting_map=model_setting_map ) if only_active: @@ -764,23 +766,26 @@ class ProviderConfiguration(BaseModel): def _get_system_provider_models( self, - model_types: list[ModelType], - provider_instance: ModelProvider, + model_types: Sequence[ModelType], + provider_schema: ProviderEntity, model_setting_map: dict[ModelType, dict[str, ModelSettings]], ) -> list[ModelWithProviderEntity]: """ Get system provider models. :param model_types: model types - :param provider_instance: provider instance + :param provider_schema: provider schema :param model_setting_map: model setting map :return: """ provider_models = [] for model_type in model_types: - for m in provider_instance.models(model_type): + for m in provider_schema.models: + if m.model_type != model_type: + continue + status = ModelStatus.ACTIVE - if m.model_type in model_setting_map and m.model in model_setting_map[m.model_type]: + if m.model in model_setting_map: model_setting = model_setting_map[m.model_type][m.model] if model_setting.enabled is False: status = ModelStatus.DISABLED @@ -801,7 +806,7 @@ class ProviderConfiguration(BaseModel): if self.provider.provider not in original_provider_configurate_methods: original_provider_configurate_methods[self.provider.provider] = [] - for configurate_method in provider_instance.get_provider_schema().configurate_methods: + for configurate_method in provider_schema.configurate_methods: original_provider_configurate_methods[self.provider.provider].append(configurate_method) should_use_custom_model = False @@ -822,14 +827,20 @@ class ProviderConfiguration(BaseModel): ]: # only customizable model for restrict_model in restrict_models: - copy_credentials = self.system_configuration.credentials.copy() + copy_credentials = ( + self.system_configuration.credentials.copy() + if self.system_configuration.credentials + else {} + ) if restrict_model.base_model_name: copy_credentials["base_model_name"] = restrict_model.base_model_name try: - custom_model_schema = provider_instance.get_model_instance( - restrict_model.model_type - ).get_customizable_model_schema_from_credentials(restrict_model.model, copy_credentials) + custom_model_schema = self.get_model_schema( + model_type=restrict_model.model_type, + model=restrict_model.model, + credentials=copy_credentials, + ) except Exception as ex: logger.warning(f"get custom model schema failed, {ex}") continue @@ -875,15 +886,15 @@ class ProviderConfiguration(BaseModel): def _get_custom_provider_models( self, - model_types: list[ModelType], - provider_instance: ModelProvider, + model_types: Sequence[ModelType], + provider_schema: ProviderEntity, model_setting_map: dict[ModelType, dict[str, ModelSettings]], ) -> list[ModelWithProviderEntity]: """ Get custom provider models. :param model_types: model types - :param provider_instance: provider instance + :param provider_schema: provider schema :param model_setting_map: model setting map :return: """ @@ -897,8 +908,10 @@ class ProviderConfiguration(BaseModel): if model_type not in self.provider.supported_model_types: continue - models = provider_instance.models(model_type) - for m in models: + for m in provider_schema.models: + if m.model_type != model_type: + continue + status = ModelStatus.ACTIVE if credentials else ModelStatus.NO_CONFIGURE load_balancing_enabled = False if m.model_type in model_setting_map and m.model in model_setting_map[m.model_type]: @@ -930,10 +943,10 @@ class ProviderConfiguration(BaseModel): continue try: - custom_model_schema = provider_instance.get_model_instance( - model_configuration.model_type - ).get_customizable_model_schema_from_credentials( - model_configuration.model, model_configuration.credentials + custom_model_schema = self.get_model_schema( + model_type=model_configuration.model_type, + model=model_configuration.model, + credentials=model_configuration.credentials, ) except Exception as ex: logger.warning(f"get custom model schema failed, {ex}") @@ -1043,7 +1056,7 @@ class ProviderConfigurations(BaseModel): return iter(self.configurations) def values(self) -> Iterator[ProviderConfiguration]: - return self.configurations.values() + return iter(self.configurations.values()) def get(self, key, default=None): return self.configurations.get(key, default) @@ -1055,7 +1068,6 @@ class ProviderModelBundle(BaseModel): """ configuration: ProviderConfiguration - provider_instance: ModelProvider model_type_instance: AIModel # pydantic configs diff --git a/api/core/helper/moderation.py b/api/core/helper/moderation.py index b880590de2..f3144039e3 100644 --- a/api/core/helper/moderation.py +++ b/api/core/helper/moderation.py @@ -23,6 +23,9 @@ def check_moderation(model_config: ModelConfigWithCredentialsEntity, text: str) if using_provider_type == ProviderType.SYSTEM and provider_name in moderation_config.providers: hosting_openai_config = hosting_configuration.provider_map["openai"] + if hosting_openai_config.credentials is None: + return False + # 2000 text per chunk length = 2000 text_chunks = [text[i : i + length] for i in range(0, len(text), length)] diff --git a/api/core/model_runtime/entities/provider_entities.py b/api/core/model_runtime/entities/provider_entities.py index bfe861a97f..a6a7b67577 100644 --- a/api/core/model_runtime/entities/provider_entities.py +++ b/api/core/model_runtime/entities/provider_entities.py @@ -5,7 +5,7 @@ from typing import Optional from pydantic import BaseModel, ConfigDict from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import ModelType, ProviderModel +from core.model_runtime.entities.model_entities import AIModelEntity, ModelType class ConfigurateMethod(Enum): @@ -101,7 +101,7 @@ class SimpleProviderEntity(BaseModel): icon_small: Optional[I18nObject] = None icon_large: Optional[I18nObject] = None supported_model_types: Sequence[ModelType] - models: list[ProviderModel] = [] + models: list[AIModelEntity] = [] class ProviderHelpEntity(BaseModel): @@ -127,7 +127,7 @@ class ProviderEntity(BaseModel): help: Optional[ProviderHelpEntity] = None supported_model_types: Sequence[ModelType] configurate_methods: list[ConfigurateMethod] - models: list[ProviderModel] = [] + models: list[AIModelEntity] = [] provider_credential_schema: Optional[ProviderCredentialSchema] = None model_credential_schema: Optional[ModelCredentialSchema] = None diff --git a/api/core/model_runtime/model_providers/__base/ai_model.py b/api/core/model_runtime/model_providers/__base/ai_model.py index 79a1d28ebe..6b04ba2efd 100644 --- a/api/core/model_runtime/model_providers/__base/ai_model.py +++ b/api/core/model_runtime/model_providers/__base/ai_model.py @@ -1,10 +1,9 @@ import decimal import os -from abc import ABC, abstractmethod from collections.abc import Mapping from typing import Optional -from pydantic import ConfigDict +from pydantic import ConfigDict, Field from core.helper.position_helper import get_position_map, sort_by_position_map from core.model_runtime.entities.common_entities import I18nObject @@ -20,34 +19,26 @@ from core.model_runtime.entities.model_entities import ( ) from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError from core.model_runtime.model_providers.__base.tokenizers.gpt2_tokenzier import GPT2Tokenizer +from core.plugin.entities.plugin_daemon import PluginModelProviderEntity from core.tools.utils.yaml_utils import load_yaml_file -class AIModel(ABC): +class AIModel: """ Base class for all models. """ - model_type: ModelType - model_schemas: Optional[list[AIModelEntity]] = None - started_at: float = 0 + tenant_id: str = Field(description="Tenant ID") + model_type: ModelType = Field(description="Model type") + plugin_id: str = Field(description="Plugin ID") + provider_name: str = Field(description="Provider") + plugin_model_provider: PluginModelProviderEntity = Field(description="Plugin model provider") + started_at: float = Field(description="Invoke start time", default=0) # pydantic configs model_config = ConfigDict(protected_namespaces=()) - @abstractmethod - def validate_credentials(self, model: str, credentials: Mapping) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - raise NotImplementedError - @property - @abstractmethod def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: """ Map model invoke error to unified error @@ -66,20 +57,18 @@ class AIModel(ABC): :param error: model invoke error :return: unified error """ - provider_name = self.__class__.__module__.split(".")[-3] - for invoke_error, model_errors in self._invoke_error_mapping.items(): if isinstance(error, tuple(model_errors)): if invoke_error == InvokeAuthorizationError: return invoke_error( description=( - f"[{provider_name}] Incorrect model credentials provided, please check and try again." + f"[{self.provider_name}] Incorrect model credentials provided, please check and try again." ) ) - return invoke_error(description=f"[{provider_name}] {invoke_error.description}, {str(error)}") + return invoke_error(description=f"[{self.provider_name}] {invoke_error.description}, {str(error)}") - return InvokeError(description=f"[{provider_name}] Error: {str(error)}") + return InvokeError(description=f"[{self.provider_name}] Error: {str(error)}") def get_price(self, model: str, credentials: dict, price_type: PriceType, tokens: int) -> PriceInfo: """ diff --git a/api/core/model_runtime/model_providers/__base/large_language_model.py b/api/core/model_runtime/model_providers/__base/large_language_model.py index ba88cc1f38..33dbce37c4 100644 --- a/api/core/model_runtime/model_providers/__base/large_language_model.py +++ b/api/core/model_runtime/model_providers/__base/large_language_model.py @@ -1,32 +1,25 @@ import logging import os -import re import time -from abc import abstractmethod -from collections.abc import Generator, Mapping +from collections.abc import Generator from typing import Optional, Union from pydantic import ConfigDict from core.model_runtime.callbacks.base_callback import Callback from core.model_runtime.callbacks.logging_callback import LoggingCallback -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage +from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMUsage from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, PromptMessage, - PromptMessageContentType, PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, ) from core.model_runtime.entities.model_entities import ( - ModelPropertyKey, ModelType, - ParameterRule, - ParameterType, PriceType, ) from core.model_runtime.model_providers.__base.ai_model import AIModel +from core.plugin.manager.model import PluginModelManager logger = logging.getLogger(__name__) @@ -71,8 +64,6 @@ class LargeLanguageModel(AIModel): if model_parameters is None: model_parameters = {} - model_parameters = self._validate_and_filter_model_parameters(model, model_parameters, credentials) - self.started_at = time.perf_counter() callbacks = callbacks or [] @@ -94,20 +85,43 @@ class LargeLanguageModel(AIModel): ) try: - if "response_format" in model_parameters: - result = self._code_block_mode_wrapper( + plugin_model_manager = PluginModelManager() + result = plugin_model_manager.invoke_llm( + tenant_id=self.tenant_id, + user_id=user or "unknown", + plugin_id=self.plugin_id, + provider=self.provider_name, + model=model, + credentials=credentials, + model_parameters=model_parameters, + prompt_messages=prompt_messages, + tools=tools, + stop=stop, + stream=stream, + ) + + if not stream: + content = "" + content_list = [] + usage = LLMUsage.empty_usage() + system_fingerprint = None + for chunk in result: + if isinstance(chunk.delta.message.content, str): + content += chunk.delta.message.content + elif isinstance(chunk.delta.message.content, list): + content_list.extend(chunk.delta.message.content) + + usage = chunk.delta.usage or LLMUsage.empty_usage() + system_fingerprint = chunk.system_fingerprint + break + + result = LLMResult( model=model, - credentials=credentials, prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - callbacks=callbacks, + message=AssistantPromptMessage(content=content or content_list), + usage=usage, + system_fingerprint=system_fingerprint, ) - else: - result = self._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) except Exception as e: self._trigger_invoke_error_callbacks( model=model, @@ -122,6 +136,7 @@ class LargeLanguageModel(AIModel): callbacks=callbacks, ) + # TODO raise self._transform_invoke_error(e) if stream and isinstance(result, Generator): @@ -153,244 +168,6 @@ class LargeLanguageModel(AIModel): return result - def _code_block_mode_wrapper( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - callbacks: Optional[list[Callback]] = None, - ) -> Union[LLMResult, Generator]: - """ - Code block mode wrapper, ensure the response is a code block with output markdown quote - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :param callbacks: callbacks - :return: full response or stream response chunk generator result - """ - - block_prompts = """You should always follow the instructions and output a valid {{block}} object. -The structure of the {{block}} object you can found in the instructions, use {"answer": "$your_answer"} as the default structure -if you are not sure about the structure. - - -{{instructions}} - -""" # noqa: E501 - - code_block = model_parameters.get("response_format", "") - if not code_block: - return self._invoke( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - model_parameters.pop("response_format") - stop = stop or [] - stop.extend(["\n```", "```\n"]) - block_prompts = block_prompts.replace("{{block}}", code_block) - - # check if there is a system message - if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage): - # override the system message - prompt_messages[0] = SystemPromptMessage( - content=block_prompts.replace("{{instructions}}", str(prompt_messages[0].content)) - ) - else: - # insert the system message - prompt_messages.insert( - 0, - SystemPromptMessage( - content=block_prompts.replace("{{instructions}}", f"Please output a valid {code_block} object.") - ), - ) - - if len(prompt_messages) > 0 and isinstance(prompt_messages[-1], UserPromptMessage): - # add ```JSON\n to the last text message - if isinstance(prompt_messages[-1].content, str): - prompt_messages[-1].content += f"\n```{code_block}\n" - elif isinstance(prompt_messages[-1].content, list): - for i in range(len(prompt_messages[-1].content) - 1, -1, -1): - if prompt_messages[-1].content[i].type == PromptMessageContentType.TEXT: - prompt_messages[-1].content[i].data += f"\n```{code_block}\n" - break - else: - # append a user message - prompt_messages.append(UserPromptMessage(content=f"```{code_block}\n")) - - response = self._invoke( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - if isinstance(response, Generator): - first_chunk = next(response) - - def new_generator(): - yield first_chunk - yield from response - - if first_chunk.delta.message.content and first_chunk.delta.message.content.startswith("`"): - return self._code_block_mode_stream_processor_with_backtick( - model=model, prompt_messages=prompt_messages, input_generator=new_generator() - ) - else: - return self._code_block_mode_stream_processor( - model=model, prompt_messages=prompt_messages, input_generator=new_generator() - ) - - return response - - def _code_block_mode_stream_processor( - self, model: str, prompt_messages: list[PromptMessage], input_generator: Generator[LLMResultChunk, None, None] - ) -> Generator[LLMResultChunk, None, None]: - """ - Code block mode stream processor, ensure the response is a code block with output markdown quote - - :param model: model name - :param prompt_messages: prompt messages - :param input_generator: input generator - :return: output generator - """ - state = "normal" - backtick_count = 0 - for piece in input_generator: - if piece.delta.message.content: - content = piece.delta.message.content - piece.delta.message.content = "" - yield piece - piece = content - else: - yield piece - continue - new_piece: str = "" - for char in piece: - char = str(char) - if state == "normal": - if char == "`": - state = "in_backticks" - backtick_count = 1 - else: - new_piece += char - elif state == "in_backticks": - if char == "`": - backtick_count += 1 - if backtick_count == 3: - state = "skip_content" - backtick_count = 0 - else: - new_piece += "`" * backtick_count + char - state = "normal" - backtick_count = 0 - elif state == "skip_content": - if char.isspace(): - state = "normal" - - if new_piece: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=new_piece, tool_calls=[]), - ), - ) - - def _code_block_mode_stream_processor_with_backtick( - self, model: str, prompt_messages: list, input_generator: Generator[LLMResultChunk, None, None] - ) -> Generator[LLMResultChunk, None, None]: - """ - Code block mode stream processor, ensure the response is a code block with output markdown quote. - This version skips the language identifier that follows the opening triple backticks. - - :param model: model name - :param prompt_messages: prompt messages - :param input_generator: input generator - :return: output generator - """ - state = "search_start" - backtick_count = 0 - - for piece in input_generator: - if piece.delta.message.content: - content = piece.delta.message.content - # Reset content to ensure we're only processing and yielding the relevant parts - piece.delta.message.content = "" - # Yield a piece with cleared content before processing it to maintain the generator structure - yield piece - piece = content - else: - # Yield pieces without content directly - yield piece - continue - - if state == "done": - continue - - new_piece: str = "" - for char in piece: - if state == "search_start": - if char == "`": - backtick_count += 1 - if backtick_count == 3: - state = "skip_language" - backtick_count = 0 - else: - backtick_count = 0 - elif state == "skip_language": - # Skip everything until the first newline, marking the end of the language identifier - if char == "\n": - state = "in_code_block" - elif state == "in_code_block": - if char == "`": - backtick_count += 1 - if backtick_count == 3: - state = "done" - break - else: - if backtick_count > 0: - # If backticks were counted but we're still collecting content, it was a false start - new_piece += "`" * backtick_count - backtick_count = 0 - new_piece += str(char) - - elif state == "done": - break - - if new_piece: - # Only yield content collected within the code block - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=new_piece, tool_calls=[]), - ), - ) - def _invoke_result_generator( self, model: str, @@ -462,34 +239,6 @@ if you are not sure about the structure. callbacks=callbacks, ) - @abstractmethod - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - raise NotImplementedError - - @abstractmethod def get_num_tokens( self, model: str, @@ -506,41 +255,18 @@ if you are not sure about the structure. :param tools: tools for tool calling :return: """ - raise NotImplementedError - - def enforce_stop_tokens(self, text: str, stop: list[str]) -> str: - """Cut off the text as soon as any stop words occur.""" - return re.split("|".join(stop), text, maxsplit=1)[0] - - def get_parameter_rules(self, model: str, credentials: dict) -> list[ParameterRule]: - """ - Get parameter rules - - :param model: model name - :param credentials: model credentials - :return: parameter rules - """ - model_schema = self.get_model_schema(model, credentials) - if model_schema: - return model_schema.parameter_rules - - return [] - - def get_model_mode(self, model: str, credentials: Optional[Mapping] = None) -> LLMMode: - """ - Get model mode - - :param model: model name - :param credentials: model credentials - :return: model mode - """ - model_schema = self.get_model_schema(model, credentials) - - mode = LLMMode.CHAT - if model_schema and model_schema.model_properties.get(ModelPropertyKey.MODE): - mode = LLMMode.value_of(model_schema.model_properties[ModelPropertyKey.MODE]) - - return mode + plugin_model_manager = PluginModelManager() + return plugin_model_manager.get_llm_num_tokens( + tenant_id=self.tenant_id, + user_id="unknown", + plugin_id=self.plugin_id, + provider=self.provider_name, + model_type=self.model_type.value, + model=model, + credentials=credentials, + prompt_messages=prompt_messages, + tools=tools, + ) def _calc_response_usage( self, model: str, credentials: dict, prompt_tokens: int, completion_tokens: int @@ -772,98 +498,3 @@ if you are not sure about the structure. raise e else: logger.warning(f"Callback {callback.__class__.__name__} on_invoke_error failed with error {e}") - - def _validate_and_filter_model_parameters(self, model: str, model_parameters: dict, credentials: dict) -> dict: - """ - Validate model parameters - - :param model: model name - :param model_parameters: model parameters - :param credentials: model credentials - :return: - """ - parameter_rules = self.get_parameter_rules(model, credentials) - - # validate model parameters - filtered_model_parameters = {} - for parameter_rule in parameter_rules: - parameter_name = parameter_rule.name - parameter_value = model_parameters.get(parameter_name) - if parameter_value is None: - if parameter_rule.use_template and parameter_rule.use_template in model_parameters: - # if parameter value is None, use template value variable name instead - parameter_value = model_parameters[parameter_rule.use_template] - else: - if parameter_rule.required: - if parameter_rule.default is not None: - filtered_model_parameters[parameter_name] = parameter_rule.default - continue - else: - raise ValueError(f"Model Parameter {parameter_name} is required.") - else: - continue - - # validate parameter value type - if parameter_rule.type == ParameterType.INT: - if not isinstance(parameter_value, int): - raise ValueError(f"Model Parameter {parameter_name} should be int.") - - # validate parameter value range - if parameter_rule.min is not None and parameter_value < parameter_rule.min: - raise ValueError( - f"Model Parameter {parameter_name} should be greater than or equal to {parameter_rule.min}." - ) - - if parameter_rule.max is not None and parameter_value > parameter_rule.max: - raise ValueError( - f"Model Parameter {parameter_name} should be less than or equal to {parameter_rule.max}." - ) - elif parameter_rule.type == ParameterType.FLOAT: - if not isinstance(parameter_value, float | int): - raise ValueError(f"Model Parameter {parameter_name} should be float.") - - # validate parameter value precision - if parameter_rule.precision is not None: - if parameter_rule.precision == 0: - if parameter_value != int(parameter_value): - raise ValueError(f"Model Parameter {parameter_name} should be int.") - else: - if parameter_value != round(parameter_value, parameter_rule.precision): - raise ValueError( - f"Model Parameter {parameter_name} should be round to {parameter_rule.precision}" - f" decimal places." - ) - - # validate parameter value range - if parameter_rule.min is not None and parameter_value < parameter_rule.min: - raise ValueError( - f"Model Parameter {parameter_name} should be greater than or equal to {parameter_rule.min}." - ) - - if parameter_rule.max is not None and parameter_value > parameter_rule.max: - raise ValueError( - f"Model Parameter {parameter_name} should be less than or equal to {parameter_rule.max}." - ) - elif parameter_rule.type == ParameterType.BOOLEAN: - if not isinstance(parameter_value, bool): - raise ValueError(f"Model Parameter {parameter_name} should be bool.") - elif parameter_rule.type == ParameterType.STRING: - if not isinstance(parameter_value, str): - raise ValueError(f"Model Parameter {parameter_name} should be string.") - - # validate options - if parameter_rule.options and parameter_value not in parameter_rule.options: - raise ValueError(f"Model Parameter {parameter_name} should be one of {parameter_rule.options}.") - elif parameter_rule.type == ParameterType.TEXT: - if not isinstance(parameter_value, str): - raise ValueError(f"Model Parameter {parameter_name} should be text.") - - # validate options - if parameter_rule.options and parameter_value not in parameter_rule.options: - raise ValueError(f"Model Parameter {parameter_name} should be one of {parameter_rule.options}.") - else: - raise ValueError(f"Model Parameter {parameter_name} type {parameter_rule.type} is not supported.") - - filtered_model_parameters[parameter_name] = parameter_value - - return filtered_model_parameters diff --git a/api/core/model_runtime/model_providers/__base/text_embedding_model.py b/api/core/model_runtime/model_providers/__base/text_embedding_model.py index a948dca20d..1a5c40ed51 100644 --- a/api/core/model_runtime/model_providers/__base/text_embedding_model.py +++ b/api/core/model_runtime/model_providers/__base/text_embedding_model.py @@ -8,6 +8,7 @@ from core.embedding.embedding_constant import EmbeddingInputType from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult from core.model_runtime.model_providers.__base.ai_model import AIModel +from core.plugin.manager.model import PluginModelManager class TextEmbeddingModel(AIModel): @@ -66,7 +67,6 @@ class TextEmbeddingModel(AIModel): """ raise NotImplementedError - @abstractmethod def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: """ Get number of tokens for given prompt messages @@ -76,7 +76,17 @@ class TextEmbeddingModel(AIModel): :param texts: texts to embed :return: """ - raise NotImplementedError + plugin_model_manager = PluginModelManager() + return plugin_model_manager.get_text_embedding_num_tokens( + tenant_id=self.tenant_id, + user_id="unknown", + plugin_id=self.plugin_id, + provider=self.provider_name, + model_type=self.model_type.value, + model=model, + credentials=credentials, + texts=texts, + ) def _get_context_size(self, model: str, credentials: dict) -> int: """ diff --git a/api/core/model_runtime/model_providers/__init__.py b/api/core/model_runtime/model_providers/__init__.py index 9d71013dbf..e69de29bb2 100644 --- a/api/core/model_runtime/model_providers/__init__.py +++ b/api/core/model_runtime/model_providers/__init__.py @@ -1,3 +0,0 @@ -from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory - -model_provider_factory = ModelProviderFactory() diff --git a/api/core/model_runtime/model_providers/anthropic/__init__.py b/api/core/model_runtime/model_providers/anthropic/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/anthropic/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/anthropic/_assets/icon_l_en.svg deleted file mode 100644 index cace17da73..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/_assets/icon_l_en.svg +++ /dev/null @@ -1,78 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/anthropic/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/anthropic/_assets/icon_s_en.svg deleted file mode 100644 index d852f04401..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/_assets/icon_s_en.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/api/core/model_runtime/model_providers/anthropic/anthropic.py b/api/core/model_runtime/model_providers/anthropic/anthropic.py deleted file mode 100644 index 5b12f04a3e..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/anthropic.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class AnthropicProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `claude-3-opus-20240229` model for validate, - model_instance.validate_credentials(model="claude-3-opus-20240229", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/anthropic/anthropic.yaml b/api/core/model_runtime/model_providers/anthropic/anthropic.yaml deleted file mode 100644 index cf41f544ef..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/anthropic.yaml +++ /dev/null @@ -1,39 +0,0 @@ -provider: anthropic -label: - en_US: Anthropic -description: - en_US: Anthropic’s powerful models, such as Claude 3. - zh_Hans: Anthropic 的强大模型,例如 Claude 3。 -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#F0F0EB" -help: - title: - en_US: Get your API Key from Anthropic - zh_Hans: 从 Anthropic 获取 API Key - url: - en_US: https://console.anthropic.com/account/keys -supported_model_types: - - llm -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: anthropic_api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: anthropic_api_url - label: - en_US: API URL - type: text-input - required: false - placeholder: - zh_Hans: 在此输入您的 API URL - en_US: Enter your API URL diff --git a/api/core/model_runtime/model_providers/anthropic/llm/__init__.py b/api/core/model_runtime/model_providers/anthropic/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/anthropic/llm/_position.yaml b/api/core/model_runtime/model_providers/anthropic/llm/_position.yaml deleted file mode 100644 index 8394c4276a..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/llm/_position.yaml +++ /dev/null @@ -1,8 +0,0 @@ -- claude-3-5-sonnet-20240620 -- claude-3-haiku-20240307 -- claude-3-opus-20240229 -- claude-3-sonnet-20240229 -- claude-2.1 -- claude-instant-1.2 -- claude-2 -- claude-instant-1 diff --git a/api/core/model_runtime/model_providers/anthropic/llm/claude-2.1.yaml b/api/core/model_runtime/model_providers/anthropic/llm/claude-2.1.yaml deleted file mode 100644 index 6707c34594..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/llm/claude-2.1.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: claude-2.1 -label: - en_US: claude-2.1 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: '8.00' - output: '24.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/anthropic/llm/claude-2.yaml b/api/core/model_runtime/model_providers/anthropic/llm/claude-2.yaml deleted file mode 100644 index 1986947129..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/llm/claude-2.yaml +++ /dev/null @@ -1,37 +0,0 @@ -model: claude-2 -label: - en_US: claude-2 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 100000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: '8.00' - output: '24.00' - unit: '0.000001' - currency: USD -deprecated: true diff --git a/api/core/model_runtime/model_providers/anthropic/llm/claude-3-5-sonnet-20240620.yaml b/api/core/model_runtime/model_providers/anthropic/llm/claude-3-5-sonnet-20240620.yaml deleted file mode 100644 index e02c5517fe..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/llm/claude-3-5-sonnet-20240620.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: claude-3-5-sonnet-20240620 -label: - en_US: claude-3-5-sonnet-20240620 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format -pricing: - input: '3.00' - output: '15.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/anthropic/llm/claude-3-haiku-20240307.yaml b/api/core/model_runtime/model_providers/anthropic/llm/claude-3-haiku-20240307.yaml deleted file mode 100644 index cb2af1308a..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/llm/claude-3-haiku-20240307.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: claude-3-haiku-20240307 -label: - en_US: claude-3-haiku-20240307 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: '0.25' - output: '1.25' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/anthropic/llm/claude-3-opus-20240229.yaml b/api/core/model_runtime/model_providers/anthropic/llm/claude-3-opus-20240229.yaml deleted file mode 100644 index 101f54c3f8..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/llm/claude-3-opus-20240229.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: claude-3-opus-20240229 -label: - en_US: claude-3-opus-20240229 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: '15.00' - output: '75.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/anthropic/llm/claude-3-sonnet-20240229.yaml b/api/core/model_runtime/model_providers/anthropic/llm/claude-3-sonnet-20240229.yaml deleted file mode 100644 index daf55553f8..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/llm/claude-3-sonnet-20240229.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: claude-3-sonnet-20240229 -label: - en_US: claude-3-sonnet-20240229 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: '3.00' - output: '15.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/anthropic/llm/claude-instant-1.2.yaml b/api/core/model_runtime/model_providers/anthropic/llm/claude-instant-1.2.yaml deleted file mode 100644 index ac69bbf4d2..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/llm/claude-instant-1.2.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: claude-instant-1.2 -label: - en_US: claude-instant-1.2 -model_type: llm -features: [ ] -model_properties: - mode: chat - context_size: 100000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: '1.63' - output: '5.51' - unit: '0.000001' - currency: USD -deprecated: true diff --git a/api/core/model_runtime/model_providers/anthropic/llm/claude-instant-1.yaml b/api/core/model_runtime/model_providers/anthropic/llm/claude-instant-1.yaml deleted file mode 100644 index 5e76d5b1c2..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/llm/claude-instant-1.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: claude-instant-1 -label: - en_US: claude-instant-1 -model_type: llm -features: [ ] -model_properties: - mode: chat - context_size: 100000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: '1.63' - output: '5.51' - unit: '0.000001' - currency: USD -deprecated: true diff --git a/api/core/model_runtime/model_providers/anthropic/llm/llm.py b/api/core/model_runtime/model_providers/anthropic/llm/llm.py deleted file mode 100644 index 46e1b415b8..0000000000 --- a/api/core/model_runtime/model_providers/anthropic/llm/llm.py +++ /dev/null @@ -1,624 +0,0 @@ -import base64 -import io -import json -from collections.abc import Generator -from typing import Optional, Union, cast - -import anthropic -import requests -from anthropic import Anthropic, Stream -from anthropic.types import ( - ContentBlockDeltaEvent, - Message, - MessageDeltaEvent, - MessageStartEvent, - MessageStopEvent, - MessageStreamEvent, - completion_create_params, -) -from anthropic.types.beta.tools import ToolsBetaMessage -from httpx import Timeout -from PIL import Image - -from core.model_runtime.callbacks.base_callback import Callback -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel - -ANTHROPIC_BLOCK_MODE_PROMPT = """You should always follow the instructions and output a valid {{block}} object. -The structure of the {{block}} object you can found in the instructions, use {"answer": "$your_answer"} as the default structure -if you are not sure about the structure. - - -{{instructions}} - -""" # noqa: E501 - - -class AnthropicLargeLanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # invoke model - return self._chat_generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def _chat_generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke llm chat model - - :param model: model name - :param credentials: credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - - # transform model parameters from completion api of anthropic to chat api - if "max_tokens_to_sample" in model_parameters: - model_parameters["max_tokens"] = model_parameters.pop("max_tokens_to_sample") - - # init model client - client = Anthropic(**credentials_kwargs) - - extra_model_kwargs = {} - if stop: - extra_model_kwargs["stop_sequences"] = stop - - if user: - extra_model_kwargs["metadata"] = completion_create_params.Metadata(user_id=user) - - system, prompt_message_dicts = self._convert_prompt_messages(prompt_messages) - - if system: - extra_model_kwargs["system"] = system - - # Add the new header for claude-3-5-sonnet-20240620 model - extra_headers = {} - if model == "claude-3-5-sonnet-20240620": - if model_parameters.get("max_tokens") > 4096: - extra_headers["anthropic-beta"] = "max-tokens-3-5-sonnet-2024-07-15" - - if tools: - extra_model_kwargs["tools"] = [self._transform_tool_prompt(tool) for tool in tools] - response = client.beta.tools.messages.create( - model=model, - messages=prompt_message_dicts, - stream=stream, - extra_headers=extra_headers, - **model_parameters, - **extra_model_kwargs, - ) - else: - # chat model - response = client.messages.create( - model=model, - messages=prompt_message_dicts, - stream=stream, - extra_headers=extra_headers, - **model_parameters, - **extra_model_kwargs, - ) - - if stream: - return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages) - - return self._handle_chat_generate_response(model, credentials, response, prompt_messages) - - def _code_block_mode_wrapper( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - callbacks: list[Callback] = None, - ) -> Union[LLMResult, Generator]: - """ - Code block mode wrapper for invoking large language model - """ - if model_parameters.get("response_format"): - stop = stop or [] - # chat model - self._transform_chat_json_prompts( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - response_format=model_parameters["response_format"], - ) - model_parameters.pop("response_format") - - return self._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def _transform_tool_prompt(self, tool: PromptMessageTool) -> dict: - return {"name": tool.name, "description": tool.description, "input_schema": tool.parameters} - - def _transform_chat_json_prompts( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - response_format: str = "JSON", - ) -> None: - """ - Transform json prompts - """ - if "```\n" not in stop: - stop.append("```\n") - if "\n```" not in stop: - stop.append("\n```") - - # check if there is a system message - if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage): - # override the system message - prompt_messages[0] = SystemPromptMessage( - content=ANTHROPIC_BLOCK_MODE_PROMPT.replace("{{instructions}}", prompt_messages[0].content).replace( - "{{block}}", response_format - ) - ) - prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}")) - else: - # insert the system message - prompt_messages.insert( - 0, - SystemPromptMessage( - content=ANTHROPIC_BLOCK_MODE_PROMPT.replace( - "{{instructions}}", f"Please output a valid {response_format} object." - ).replace("{{block}}", response_format) - ), - ) - prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}")) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: - """ - prompt = self._convert_messages_to_prompt_anthropic(prompt_messages) - - client = Anthropic(api_key="") - tokens = client.count_tokens(prompt) - - tool_call_inner_prompts_tokens_map = { - "claude-3-opus-20240229": 395, - "claude-3-haiku-20240307": 264, - "claude-3-sonnet-20240229": 159, - } - - if model in tool_call_inner_prompts_tokens_map and tools: - tokens += tool_call_inner_prompts_tokens_map[model] - - return tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._chat_generate( - model=model, - credentials=credentials, - prompt_messages=[ - UserPromptMessage(content="ping"), - ], - model_parameters={ - "temperature": 0, - "max_tokens": 20, - }, - stream=False, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _handle_chat_generate_response( - self, - model: str, - credentials: dict, - response: Union[Message, ToolsBetaMessage], - prompt_messages: list[PromptMessage], - ) -> LLMResult: - """ - Handle llm chat response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response - """ - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content="", tool_calls=[]) - - for content in response.content: - if content.type == "text": - assistant_prompt_message.content += content.text - elif content.type == "tool_use": - tool_call = AssistantPromptMessage.ToolCall( - id=content.id, - type="function", - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=content.name, arguments=json.dumps(content.input) - ), - ) - assistant_prompt_message.tool_calls.append(tool_call) - - # calculate num tokens - if response.usage: - # transform usage - prompt_tokens = response.usage.input_tokens - completion_tokens = response.usage.output_tokens - else: - # calculate num tokens - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - response = LLMResult( - model=response.model, prompt_messages=prompt_messages, message=assistant_prompt_message, usage=usage - ) - - return response - - def _handle_chat_generate_stream_response( - self, model: str, credentials: dict, response: Stream[MessageStreamEvent], prompt_messages: list[PromptMessage] - ) -> Generator: - """ - Handle llm chat stream response - - :param model: model name - :param response: response - :param prompt_messages: prompt messages - :return: llm response chunk generator - """ - full_assistant_content = "" - return_model = None - input_tokens = 0 - output_tokens = 0 - finish_reason = None - index = 0 - - tool_calls: list[AssistantPromptMessage.ToolCall] = [] - - for chunk in response: - if isinstance(chunk, MessageStartEvent): - if hasattr(chunk, "content_block"): - content_block = chunk.content_block - if isinstance(content_block, dict): - if content_block.get("type") == "tool_use": - tool_call = AssistantPromptMessage.ToolCall( - id=content_block.get("id"), - type="function", - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=content_block.get("name"), arguments="" - ), - ) - tool_calls.append(tool_call) - elif hasattr(chunk, "delta"): - delta = chunk.delta - if isinstance(delta, dict) and len(tool_calls) > 0: - if delta.get("type") == "input_json_delta": - tool_calls[-1].function.arguments += delta.get("partial_json", "") - elif chunk.message: - return_model = chunk.message.model - input_tokens = chunk.message.usage.input_tokens - elif isinstance(chunk, MessageDeltaEvent): - output_tokens = chunk.usage.output_tokens - finish_reason = chunk.delta.stop_reason - elif isinstance(chunk, MessageStopEvent): - # transform usage - usage = self._calc_response_usage(model, credentials, input_tokens, output_tokens) - - # transform empty tool call arguments to {} - for tool_call in tool_calls: - if not tool_call.function.arguments: - tool_call.function.arguments = "{}" - - yield LLMResultChunk( - model=return_model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index + 1, - message=AssistantPromptMessage(content="", tool_calls=tool_calls), - finish_reason=finish_reason, - usage=usage, - ), - ) - elif isinstance(chunk, ContentBlockDeltaEvent): - chunk_text = chunk.delta.text or "" - full_assistant_content += chunk_text - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=chunk_text) - - index = chunk.index - - yield LLMResultChunk( - model=return_model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=chunk.index, - message=assistant_prompt_message, - ), - ) - - def _to_credential_kwargs(self, credentials: dict) -> dict: - """ - Transform credentials to kwargs for model instance - - :param credentials: - :return: - """ - credentials_kwargs = { - "api_key": credentials["anthropic_api_key"], - "timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0), - "max_retries": 1, - } - - if credentials.get("anthropic_api_url"): - credentials["anthropic_api_url"] = credentials["anthropic_api_url"].rstrip("/") - credentials_kwargs["base_url"] = credentials["anthropic_api_url"] - - return credentials_kwargs - - def _convert_prompt_messages(self, prompt_messages: list[PromptMessage]) -> tuple[str, list[dict]]: - """ - Convert prompt messages to dict list and system - """ - system = "" - first_loop = True - for message in prompt_messages: - if isinstance(message, SystemPromptMessage): - message.content = message.content.strip() - if first_loop: - system = message.content - first_loop = False - else: - system += "\n" - system += message.content - - prompt_message_dicts = [] - for message in prompt_messages: - if not isinstance(message, SystemPromptMessage): - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - prompt_message_dicts.append(message_dict) - else: - sub_messages = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - sub_message_dict = {"type": "text", "text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - if not message_content.data.startswith("data:"): - # fetch image data from url - try: - image_content = requests.get(message_content.data).content - with Image.open(io.BytesIO(image_content)) as img: - mime_type = f"image/{img.format.lower()}" - base64_data = base64.b64encode(image_content).decode("utf-8") - except Exception as ex: - raise ValueError( - f"Failed to fetch image data from url {message_content.data}, {ex}" - ) - else: - data_split = message_content.data.split(";base64,") - mime_type = data_split[0].replace("data:", "") - base64_data = data_split[1] - - if mime_type not in {"image/jpeg", "image/png", "image/gif", "image/webp"}: - raise ValueError( - f"Unsupported image type {mime_type}, " - f"only support image/jpeg, image/png, image/gif, and image/webp" - ) - - sub_message_dict = { - "type": "image", - "source": {"type": "base64", "media_type": mime_type, "data": base64_data}, - } - sub_messages.append(sub_message_dict) - prompt_message_dicts.append({"role": "user", "content": sub_messages}) - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - content = [] - if message.tool_calls: - for tool_call in message.tool_calls: - content.append( - { - "type": "tool_use", - "id": tool_call.id, - "name": tool_call.function.name, - "input": json.loads(tool_call.function.arguments), - } - ) - if message.content: - content.append({"type": "text", "text": message.content}) - - if prompt_message_dicts[-1]["role"] == "assistant": - prompt_message_dicts[-1]["content"].extend(content) - else: - prompt_message_dicts.append({"role": "assistant", "content": content}) - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - message_dict = { - "role": "user", - "content": [ - {"type": "tool_result", "tool_use_id": message.tool_call_id, "content": message.content} - ], - } - prompt_message_dicts.append(message_dict) - else: - raise ValueError(f"Got unknown type {message}") - - return system, prompt_message_dicts - - def _convert_one_message_to_text(self, message: PromptMessage) -> str: - """ - Convert a single message to a string. - - :param message: PromptMessage to convert. - :return: String representation of the message. - """ - human_prompt = "\n\nHuman:" - ai_prompt = "\n\nAssistant:" - content = message.content - - if isinstance(message, UserPromptMessage): - message_text = f"{human_prompt} {content}" - if not isinstance(message.content, list): - message_text = f"{ai_prompt} {content}" - else: - message_text = "" - for sub_message in message.content: - if sub_message.type == PromptMessageContentType.TEXT: - message_text += f"{human_prompt} {sub_message.data}" - elif sub_message.type == PromptMessageContentType.IMAGE: - message_text += f"{human_prompt} [IMAGE]" - elif isinstance(message, AssistantPromptMessage): - if not isinstance(message.content, list): - message_text = f"{ai_prompt} {content}" - else: - message_text = "" - for sub_message in message.content: - if sub_message.type == PromptMessageContentType.TEXT: - message_text += f"{ai_prompt} {sub_message.data}" - elif sub_message.type == PromptMessageContentType.IMAGE: - message_text += f"{ai_prompt} [IMAGE]" - elif isinstance(message, SystemPromptMessage): - message_text = content - elif isinstance(message, ToolPromptMessage): - message_text = f"{human_prompt} {message.content}" - else: - raise ValueError(f"Got unknown type {message}") - - return message_text - - def _convert_messages_to_prompt_anthropic(self, messages: list[PromptMessage]) -> str: - """ - Format a list of messages into a full prompt for the Anthropic model - - :param messages: List of PromptMessage to combine. - :return: Combined string with necessary human_prompt and ai_prompt tags. - """ - if not messages: - return "" - - messages = messages.copy() # don't mutate the original list - if not isinstance(messages[-1], AssistantPromptMessage): - messages.append(AssistantPromptMessage(content="")) - - text = "".join(self._convert_one_message_to_text(message) for message in messages) - - # trim off the trailing ' ' that might come from the "Assistant: " - return text.rstrip() - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [anthropic.APIConnectionError, anthropic.APITimeoutError], - InvokeServerUnavailableError: [anthropic.InternalServerError], - InvokeRateLimitError: [anthropic.RateLimitError], - InvokeAuthorizationError: [anthropic.AuthenticationError, anthropic.PermissionDeniedError], - InvokeBadRequestError: [ - anthropic.BadRequestError, - anthropic.NotFoundError, - anthropic.UnprocessableEntityError, - anthropic.APIError, - ], - } diff --git a/api/core/model_runtime/model_providers/azure_ai_studio/__init__.py b/api/core/model_runtime/model_providers/azure_ai_studio/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/azure_ai_studio/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/azure_ai_studio/_assets/icon_l_en.png deleted file mode 100644 index 4b941654a7..0000000000 Binary files a/api/core/model_runtime/model_providers/azure_ai_studio/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/azure_ai_studio/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/azure_ai_studio/_assets/icon_s_en.png deleted file mode 100644 index ca3043dc8d..0000000000 Binary files a/api/core/model_runtime/model_providers/azure_ai_studio/_assets/icon_s_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/azure_ai_studio/azure_ai_studio.py b/api/core/model_runtime/model_providers/azure_ai_studio/azure_ai_studio.py deleted file mode 100644 index 75d21d1ce9..0000000000 --- a/api/core/model_runtime/model_providers/azure_ai_studio/azure_ai_studio.py +++ /dev/null @@ -1,17 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class AzureAIStudioProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - pass diff --git a/api/core/model_runtime/model_providers/azure_ai_studio/azure_ai_studio.yaml b/api/core/model_runtime/model_providers/azure_ai_studio/azure_ai_studio.yaml deleted file mode 100644 index 9e17ba0884..0000000000 --- a/api/core/model_runtime/model_providers/azure_ai_studio/azure_ai_studio.yaml +++ /dev/null @@ -1,65 +0,0 @@ -provider: azure_ai_studio -label: - zh_Hans: Azure AI Studio - en_US: Azure AI Studio -icon_small: - en_US: icon_s_en.png -icon_large: - en_US: icon_l_en.png -description: - en_US: Azure AI Studio - zh_Hans: Azure AI Studio -background: "#93c5fd" -help: - title: - en_US: How to deploy customized model on Azure AI Studio - zh_Hans: 如何在Azure AI Studio上的私有化部署的模型 - url: - en_US: https://learn.microsoft.com/en-us/azure/ai-studio/how-to/deploy-models - zh_Hans: https://learn.microsoft.com/zh-cn/azure/ai-studio/how-to/deploy-models -supported_model_types: - - llm - - rerank -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: endpoint - label: - en_US: Azure AI Studio Endpoint - type: text-input - required: true - placeholder: - zh_Hans: 请输入你的Azure AI Studio推理端点 - en_US: 'Enter your API Endpoint, eg: https://example.com' - - variable: api_key - required: true - label: - en_US: API Key - zh_Hans: API Key - type: secret-input - placeholder: - en_US: Enter your Azure AI Studio API Key - zh_Hans: 在此输入您的 Azure AI Studio API Key - show_on: - - variable: __model_type - value: llm - - variable: jwt_token - required: true - label: - en_US: JWT Token - zh_Hans: JWT令牌 - type: secret-input - placeholder: - en_US: Enter your Azure AI Studio JWT Token - zh_Hans: 在此输入您的 Azure AI Studio 推理 API Key - show_on: - - variable: __model_type - value: rerank diff --git a/api/core/model_runtime/model_providers/azure_ai_studio/llm/__init__.py b/api/core/model_runtime/model_providers/azure_ai_studio/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/azure_ai_studio/llm/llm.py b/api/core/model_runtime/model_providers/azure_ai_studio/llm/llm.py deleted file mode 100644 index 516ef8b295..0000000000 --- a/api/core/model_runtime/model_providers/azure_ai_studio/llm/llm.py +++ /dev/null @@ -1,334 +0,0 @@ -import logging -from collections.abc import Generator -from typing import Any, Optional, Union - -from azure.ai.inference import ChatCompletionsClient -from azure.ai.inference.models import StreamingChatCompletionsUpdate -from azure.core.credentials import AzureKeyCredential -from azure.core.exceptions import ( - ClientAuthenticationError, - DecodeError, - DeserializationError, - HttpResponseError, - ResourceExistsError, - ResourceModifiedError, - ResourceNotFoundError, - ResourceNotModifiedError, - SerializationError, - ServiceRequestError, - ServiceResponseError, -) - -from core.model_runtime.callbacks.base_callback import Callback -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - I18nObject, - ModelType, - ParameterRule, - ParameterType, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel - -logger = logging.getLogger(__name__) - - -class AzureAIStudioLargeLanguageModel(LargeLanguageModel): - """ - Model class for Azure AI Studio large language model. - """ - - client: Any = None - - from azure.ai.inference.models import StreamingChatCompletionsUpdate - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - - if not self.client: - endpoint = credentials.get("endpoint") - api_key = credentials.get("api_key") - self.client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(api_key)) - - messages = [{"role": msg.role.value, "content": msg.content} for msg in prompt_messages] - - payload = { - "messages": messages, - "max_tokens": model_parameters.get("max_tokens", 4096), - "temperature": model_parameters.get("temperature", 0), - "top_p": model_parameters.get("top_p", 1), - "stream": stream, - } - - if stop: - payload["stop"] = stop - - if tools: - payload["tools"] = [tool.model_dump() for tool in tools] - - try: - response = self.client.complete(**payload) - - if stream: - return self._handle_stream_response(response, model, prompt_messages) - else: - return self._handle_non_stream_response(response, model, prompt_messages, credentials) - except Exception as e: - raise self._transform_invoke_error(e) - - def _handle_stream_response(self, response, model: str, prompt_messages: list[PromptMessage]) -> Generator: - for chunk in response: - if isinstance(chunk, StreamingChatCompletionsUpdate): - if chunk.choices: - delta = chunk.choices[0].delta - if delta.content: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=delta.content, tool_calls=[]), - ), - ) - - def _handle_non_stream_response( - self, response, model: str, prompt_messages: list[PromptMessage], credentials: dict - ) -> LLMResult: - assistant_text = response.choices[0].message.content - assistant_prompt_message = AssistantPromptMessage(content=assistant_text) - usage = self._calc_response_usage( - model, credentials, response.usage.prompt_tokens, response.usage.completion_tokens - ) - result = LLMResult(model=model, prompt_messages=prompt_messages, message=assistant_prompt_message, usage=usage) - - if hasattr(response, "system_fingerprint"): - result.system_fingerprint = response.system_fingerprint - - return result - - def _invoke_result_generator( - self, - model: str, - result: Generator, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - callbacks: Optional[list[Callback]] = None, - ) -> Generator: - """ - Invoke result generator - - :param result: result generator - :return: result generator - """ - callbacks = callbacks or [] - prompt_message = AssistantPromptMessage(content="") - usage = None - system_fingerprint = None - real_model = model - - try: - for chunk in result: - if isinstance(chunk, dict): - content = chunk["choices"][0]["message"]["content"] - usage = chunk["usage"] - chunk = LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=content, tool_calls=[]), - ), - system_fingerprint=chunk.get("system_fingerprint"), - ) - - yield chunk - - self._trigger_new_chunk_callbacks( - chunk=chunk, - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - callbacks=callbacks, - ) - - prompt_message.content += chunk.delta.message.content - real_model = chunk.model - if hasattr(chunk.delta, "usage"): - usage = chunk.delta.usage - - if chunk.system_fingerprint: - system_fingerprint = chunk.system_fingerprint - except Exception as e: - raise self._transform_invoke_error(e) - - self._trigger_after_invoke_callbacks( - model=model, - result=LLMResult( - model=real_model, - prompt_messages=prompt_messages, - message=prompt_message, - usage=usage or LLMUsage.empty_usage(), - system_fingerprint=system_fingerprint, - ), - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - callbacks=callbacks, - ) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: - """ - # Implement token counting logic here - # Might need to use a tokenizer specific to the Azure AI Studio model - return 0 - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - endpoint = credentials.get("endpoint") - api_key = credentials.get("api_key") - client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(api_key)) - client.get_model_info() - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [ - ServiceRequestError, - ], - InvokeServerUnavailableError: [ - ServiceResponseError, - ], - InvokeAuthorizationError: [ - ClientAuthenticationError, - ], - InvokeBadRequestError: [ - HttpResponseError, - DecodeError, - ResourceExistsError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceNotModifiedError, - SerializationError, - DeserializationError, - ], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - Used to define customizable model schema - """ - rules = [ - ParameterRule( - name="temperature", - type=ParameterType.FLOAT, - use_template="temperature", - label=I18nObject(zh_Hans="温度", en_US="Temperature"), - ), - ParameterRule( - name="top_p", - type=ParameterType.FLOAT, - use_template="top_p", - label=I18nObject(zh_Hans="Top P", en_US="Top P"), - ), - ParameterRule( - name="max_tokens", - type=ParameterType.INT, - use_template="max_tokens", - min=1, - default=512, - label=I18nObject(zh_Hans="最大生成长度", en_US="Max Tokens"), - ), - ] - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.LLM, - features=[], - model_properties={}, - parameter_rules=rules, - ) - - return entity diff --git a/api/core/model_runtime/model_providers/azure_ai_studio/rerank/__init__.py b/api/core/model_runtime/model_providers/azure_ai_studio/rerank/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/azure_ai_studio/rerank/rerank.py b/api/core/model_runtime/model_providers/azure_ai_studio/rerank/rerank.py deleted file mode 100644 index 6ed7ab277c..0000000000 --- a/api/core/model_runtime/model_providers/azure_ai_studio/rerank/rerank.py +++ /dev/null @@ -1,164 +0,0 @@ -import json -import logging -import os -import ssl -import urllib.request -from typing import Optional - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType -from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.rerank_model import RerankModel - -logger = logging.getLogger(__name__) - - -class AzureRerankModel(RerankModel): - """ - Model class for Azure AI Studio rerank model. - """ - - def _allow_self_signed_https(self, allowed): - # bypass the server certificate verification on client side - if allowed and not os.environ.get("PYTHONHTTPSVERIFY", "") and getattr(ssl, "_create_unverified_context", None): - ssl._create_default_https_context = ssl._create_unverified_context - - def _azure_rerank(self, query_input: str, docs: list[str], endpoint: str, api_key: str): - # self._allow_self_signed_https(True) # Enable if using self-signed certificate - - data = {"inputs": query_input, "docs": docs} - - body = json.dumps(data).encode("utf-8") - headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"} - - req = urllib.request.Request(endpoint, body, headers) - - try: - with urllib.request.urlopen(req) as response: - result = response.read() - return json.loads(result) - except urllib.error.HTTPError as error: - logger.error(f"The request failed with status code: {error.code}") - logger.error(error.info()) - logger.error(error.read().decode("utf8", "ignore")) - raise - - def _invoke( - self, - model: str, - credentials: dict, - query: str, - docs: list[str], - score_threshold: Optional[float] = None, - top_n: Optional[int] = None, - user: Optional[str] = None, - ) -> RerankResult: - """ - Invoke rerank model - - :param model: model name - :param credentials: model credentials - :param query: search query - :param docs: docs for reranking - :param score_threshold: score threshold - :param top_n: top n - :param user: unique user id - :return: rerank result - """ - try: - if len(docs) == 0: - return RerankResult(model=model, docs=[]) - - endpoint = credentials.get("endpoint") - api_key = credentials.get("jwt_token") - - if not endpoint or not api_key: - raise ValueError("Azure endpoint and API key must be provided in credentials") - - result = self._azure_rerank(query, docs, endpoint, api_key) - logger.info(f"Azure rerank result: {result}") - - rerank_documents = [] - for idx, (doc, score_dict) in enumerate(zip(docs, result)): - score = score_dict["score"] - rerank_document = RerankDocument(index=idx, text=doc, score=score) - - if score_threshold is None or score >= score_threshold: - rerank_documents.append(rerank_document) - - rerank_documents.sort(key=lambda x: x.score, reverse=True) - - if top_n: - rerank_documents = rerank_documents[:top_n] - - return RerankResult(model=model, docs=rerank_documents) - - except Exception as e: - logger.exception(f"Exception in Azure rerank: {e}") - raise - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke( - model=model, - credentials=credentials, - query="What is the capital of the United States?", - docs=[ - "Carson City is the capital city of the American state of Nevada. At the 2010 United States " - "Census, Carson City had a population of 55,274.", - "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " - "are a political division controlled by the United States. Its capital is Saipan.", - ], - score_threshold=0.8, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [urllib.error.URLError], - InvokeServerUnavailableError: [urllib.error.HTTPError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError, json.JSONDecodeError], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.RERANK, - model_properties={}, - parameter_rules=[], - ) - - return entity diff --git a/api/core/model_runtime/model_providers/azure_openai/__init__.py b/api/core/model_runtime/model_providers/azure_openai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/azure_openai/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/azure_openai/_assets/icon_l_en.png deleted file mode 100644 index 0791a67911..0000000000 Binary files a/api/core/model_runtime/model_providers/azure_openai/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/azure_openai/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/azure_openai/_assets/icon_s_en.svg deleted file mode 100644 index df1f54f36e..0000000000 --- a/api/core/model_runtime/model_providers/azure_openai/_assets/icon_s_en.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/api/core/model_runtime/model_providers/azure_openai/_common.py b/api/core/model_runtime/model_providers/azure_openai/_common.py deleted file mode 100644 index 32a0269af4..0000000000 --- a/api/core/model_runtime/model_providers/azure_openai/_common.py +++ /dev/null @@ -1,42 +0,0 @@ -import openai -from httpx import Timeout - -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.model_providers.azure_openai._constant import AZURE_OPENAI_API_VERSION - - -class _CommonAzureOpenAI: - @staticmethod - def _to_credential_kwargs(credentials: dict) -> dict: - api_version = credentials.get("openai_api_version", AZURE_OPENAI_API_VERSION) - credentials_kwargs = { - "api_key": credentials["openai_api_key"], - "azure_endpoint": credentials["openai_api_base"], - "api_version": api_version, - "timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0), - "max_retries": 1, - } - - return credentials_kwargs - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return { - InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError], - InvokeServerUnavailableError: [openai.InternalServerError], - InvokeRateLimitError: [openai.RateLimitError], - InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError], - InvokeBadRequestError: [ - openai.BadRequestError, - openai.NotFoundError, - openai.UnprocessableEntityError, - openai.APIError, - ], - } diff --git a/api/core/model_runtime/model_providers/azure_openai/_constant.py b/api/core/model_runtime/model_providers/azure_openai/_constant.py deleted file mode 100644 index 0dada70cc5..0000000000 --- a/api/core/model_runtime/model_providers/azure_openai/_constant.py +++ /dev/null @@ -1,1262 +0,0 @@ -from pydantic import BaseModel - -from core.model_runtime.entities.defaults import PARAMETER_RULE_TEMPLATE -from core.model_runtime.entities.llm_entities import LLMMode -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - DefaultParameterName, - FetchFrom, - I18nObject, - ModelFeature, - ModelPropertyKey, - ModelType, - ParameterRule, - PriceConfig, -) - -AZURE_OPENAI_API_VERSION = "2024-02-15-preview" - -AZURE_DEFAULT_PARAM_SEED_HELP = I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性," - "您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically," - " such that repeated requests with the same seed and parameters should return the same result." - " Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter" - " to monitor changes in the backend.", -) - - -def _get_max_tokens(default: int, min_val: int, max_val: int) -> ParameterRule: - rule = ParameterRule( - name="max_tokens", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.MAX_TOKENS], - ) - rule.default = default - rule.min = min_val - rule.max = max_val - return rule - - -class AzureBaseModel(BaseModel): - base_model_name: str - entity: AIModelEntity - - -LLM_BASE_MODELS = [ - AzureBaseModel( - base_model_name="gpt-35-turbo", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 16385, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=4096), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object"], - ), - ], - pricing=PriceConfig( - input=0.0005, - output=0.0015, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-35-turbo-16k", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 16385, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=16385), - ], - pricing=PriceConfig( - input=0.003, - output=0.004, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-35-turbo-0125", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 16385, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=4096), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object"], - ), - ], - pricing=PriceConfig( - input=0.0005, - output=0.0015, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-4", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 8192, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=8192), - ParameterRule( - name="seed", - label=I18nObject(zh_Hans="种子", en_US="Seed"), - type="int", - help=AZURE_DEFAULT_PARAM_SEED_HELP, - required=False, - precision=2, - min=0, - max=1, - ), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object"], - ), - ], - pricing=PriceConfig( - input=0.03, - output=0.06, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-4-32k", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 32768, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=32768), - ParameterRule( - name="seed", - label=I18nObject(zh_Hans="种子", en_US="Seed"), - type="int", - help=AZURE_DEFAULT_PARAM_SEED_HELP, - required=False, - precision=2, - min=0, - max=1, - ), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object"], - ), - ], - pricing=PriceConfig( - input=0.06, - output=0.12, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-4-0125-preview", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 128000, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=4096), - ParameterRule( - name="seed", - label=I18nObject(zh_Hans="种子", en_US="Seed"), - type="int", - help=AZURE_DEFAULT_PARAM_SEED_HELP, - required=False, - precision=2, - min=0, - max=1, - ), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object"], - ), - ], - pricing=PriceConfig( - input=0.01, - output=0.03, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-4-1106-preview", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 128000, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=4096), - ParameterRule( - name="seed", - label=I18nObject(zh_Hans="种子", en_US="Seed"), - type="int", - help=AZURE_DEFAULT_PARAM_SEED_HELP, - required=False, - precision=2, - min=0, - max=1, - ), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object"], - ), - ], - pricing=PriceConfig( - input=0.01, - output=0.03, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-4o-mini", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.VISION, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 128000, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=16384), - ParameterRule( - name="seed", - label=I18nObject(zh_Hans="种子", en_US="Seed"), - type="int", - help=AZURE_DEFAULT_PARAM_SEED_HELP, - required=False, - precision=2, - min=0, - max=1, - ), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object"], - ), - ], - pricing=PriceConfig( - input=0.150, - output=0.600, - unit=0.000001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-4o-mini-2024-07-18", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.VISION, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 128000, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=16384), - ParameterRule( - name="seed", - label=I18nObject(zh_Hans="种子", en_US="Seed"), - type="int", - help=AZURE_DEFAULT_PARAM_SEED_HELP, - required=False, - precision=2, - min=0, - max=1, - ), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object", "json_schema"], - ), - ParameterRule( - name="json_schema", - label=I18nObject(en_US="JSON Schema"), - type="text", - help=I18nObject( - zh_Hans="设置返回的json schema,llm将按照它返回", - en_US="Set a response json schema will ensure LLM to adhere it.", - ), - required=False, - ), - ], - pricing=PriceConfig( - input=0.150, - output=0.600, - unit=0.000001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-4o", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.VISION, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 128000, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=4096), - ParameterRule( - name="seed", - label=I18nObject(zh_Hans="种子", en_US="Seed"), - type="int", - help=AZURE_DEFAULT_PARAM_SEED_HELP, - required=False, - precision=2, - min=0, - max=1, - ), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object"], - ), - ], - pricing=PriceConfig( - input=5.00, - output=15.00, - unit=0.000001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-4o-2024-05-13", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.VISION, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 128000, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=4096), - ParameterRule( - name="seed", - label=I18nObject(zh_Hans="种子", en_US="Seed"), - type="int", - help=AZURE_DEFAULT_PARAM_SEED_HELP, - required=False, - precision=2, - min=0, - max=1, - ), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object"], - ), - ], - pricing=PriceConfig( - input=5.00, - output=15.00, - unit=0.000001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-4o-2024-08-06", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.VISION, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 128000, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=4096), - ParameterRule( - name="seed", - label=I18nObject(zh_Hans="种子", en_US="Seed"), - type="int", - help=AZURE_DEFAULT_PARAM_SEED_HELP, - required=False, - precision=2, - min=0, - max=1, - ), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object", "json_schema"], - ), - ParameterRule( - name="json_schema", - label=I18nObject(en_US="JSON Schema"), - type="text", - help=I18nObject( - zh_Hans="设置返回的json schema,llm将按照它返回", - en_US="Set a response json schema will ensure LLM to adhere it.", - ), - required=False, - ), - ], - pricing=PriceConfig( - input=5.00, - output=15.00, - unit=0.000001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-4-turbo", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.VISION, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 128000, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=4096), - ParameterRule( - name="seed", - label=I18nObject(zh_Hans="种子", en_US="Seed"), - type="int", - help=AZURE_DEFAULT_PARAM_SEED_HELP, - required=False, - precision=2, - min=0, - max=1, - ), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object"], - ), - ], - pricing=PriceConfig( - input=0.01, - output=0.03, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-4-turbo-2024-04-09", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ - ModelFeature.AGENT_THOUGHT, - ModelFeature.VISION, - ModelFeature.MULTI_TOOL_CALL, - ModelFeature.STREAM_TOOL_CALL, - ], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 128000, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=4096), - ParameterRule( - name="seed", - label=I18nObject(zh_Hans="种子", en_US="Seed"), - type="int", - help=AZURE_DEFAULT_PARAM_SEED_HELP, - required=False, - precision=2, - min=0, - max=1, - ), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object"], - ), - ], - pricing=PriceConfig( - input=0.01, - output=0.03, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-4-vision-preview", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - features=[ModelFeature.VISION], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.CHAT.value, - ModelPropertyKey.CONTEXT_SIZE: 128000, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=4096), - ParameterRule( - name="seed", - label=I18nObject(zh_Hans="种子", en_US="Seed"), - type="int", - help=AZURE_DEFAULT_PARAM_SEED_HELP, - required=False, - precision=2, - min=0, - max=1, - ), - ParameterRule( - name="response_format", - label=I18nObject(zh_Hans="回复格式", en_US="response_format"), - type="string", - help=I18nObject( - zh_Hans="指定模型必须输出的格式", en_US="specifying the format that the model must output" - ), - required=False, - options=["text", "json_object"], - ), - ], - pricing=PriceConfig( - input=0.01, - output=0.03, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="gpt-35-turbo-instruct", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.COMPLETION.value, - ModelPropertyKey.CONTEXT_SIZE: 4096, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=4096), - ], - pricing=PriceConfig( - input=0.0015, - output=0.002, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="text-davinci-003", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject( - en_US="fake-deployment-name-label", - ), - model_type=ModelType.LLM, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: LLMMode.COMPLETION.value, - ModelPropertyKey.CONTEXT_SIZE: 4096, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], - ), - ParameterRule( - name="top_p", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], - ), - ParameterRule( - name="presence_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], - ), - ParameterRule( - name="frequency_penalty", - **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], - ), - _get_max_tokens(default=512, min_val=1, max_val=4096), - ], - pricing=PriceConfig( - input=0.02, - output=0.02, - unit=0.001, - currency="USD", - ), - ), - ), -] - -EMBEDDING_BASE_MODELS = [ - AzureBaseModel( - base_model_name="text-embedding-ada-002", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject(en_US="fake-deployment-name-label"), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.TEXT_EMBEDDING, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: 8097, - ModelPropertyKey.MAX_CHUNKS: 32, - }, - pricing=PriceConfig( - input=0.0001, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="text-embedding-3-small", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject(en_US="fake-deployment-name-label"), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.TEXT_EMBEDDING, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: 8191, - ModelPropertyKey.MAX_CHUNKS: 32, - }, - pricing=PriceConfig( - input=0.00002, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="text-embedding-3-large", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject(en_US="fake-deployment-name-label"), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.TEXT_EMBEDDING, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: 8191, - ModelPropertyKey.MAX_CHUNKS: 32, - }, - pricing=PriceConfig( - input=0.00013, - unit=0.001, - currency="USD", - ), - ), - ), -] -SPEECH2TEXT_BASE_MODELS = [ - AzureBaseModel( - base_model_name="whisper-1", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject(en_US="fake-deployment-name-label"), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.SPEECH2TEXT, - model_properties={ - ModelPropertyKey.FILE_UPLOAD_LIMIT: 25, - ModelPropertyKey.SUPPORTED_FILE_EXTENSIONS: "flac,mp3,mp4,mpeg,mpga,m4a,ogg,wav,webm", - }, - ), - ) -] -TTS_BASE_MODELS = [ - AzureBaseModel( - base_model_name="tts-1", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject(en_US="fake-deployment-name-label"), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.TTS, - model_properties={ - ModelPropertyKey.DEFAULT_VOICE: "alloy", - ModelPropertyKey.VOICES: [ - { - "mode": "alloy", - "name": "Alloy", - "language": ["zh-Hans", "en-US", "de-DE", "fr-FR", "es-ES", "it-IT", "th-TH", "id-ID"], - }, - { - "mode": "echo", - "name": "Echo", - "language": ["zh-Hans", "en-US", "de-DE", "fr-FR", "es-ES", "it-IT", "th-TH", "id-ID"], - }, - { - "mode": "fable", - "name": "Fable", - "language": ["zh-Hans", "en-US", "de-DE", "fr-FR", "es-ES", "it-IT", "th-TH", "id-ID"], - }, - { - "mode": "onyx", - "name": "Onyx", - "language": ["zh-Hans", "en-US", "de-DE", "fr-FR", "es-ES", "it-IT", "th-TH", "id-ID"], - }, - { - "mode": "nova", - "name": "Nova", - "language": ["zh-Hans", "en-US", "de-DE", "fr-FR", "es-ES", "it-IT", "th-TH", "id-ID"], - }, - { - "mode": "shimmer", - "name": "Shimmer", - "language": ["zh-Hans", "en-US", "de-DE", "fr-FR", "es-ES", "it-IT", "th-TH", "id-ID"], - }, - ], - ModelPropertyKey.WORD_LIMIT: 120, - ModelPropertyKey.AUDIO_TYPE: "mp3", - ModelPropertyKey.MAX_WORKERS: 5, - }, - pricing=PriceConfig( - input=0.015, - unit=0.001, - currency="USD", - ), - ), - ), - AzureBaseModel( - base_model_name="tts-1-hd", - entity=AIModelEntity( - model="fake-deployment-name", - label=I18nObject(en_US="fake-deployment-name-label"), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.TTS, - model_properties={ - ModelPropertyKey.DEFAULT_VOICE: "alloy", - ModelPropertyKey.VOICES: [ - { - "mode": "alloy", - "name": "Alloy", - "language": ["zh-Hans", "en-US", "de-DE", "fr-FR", "es-ES", "it-IT", "th-TH", "id-ID"], - }, - { - "mode": "echo", - "name": "Echo", - "language": ["zh-Hans", "en-US", "de-DE", "fr-FR", "es-ES", "it-IT", "th-TH", "id-ID"], - }, - { - "mode": "fable", - "name": "Fable", - "language": ["zh-Hans", "en-US", "de-DE", "fr-FR", "es-ES", "it-IT", "th-TH", "id-ID"], - }, - { - "mode": "onyx", - "name": "Onyx", - "language": ["zh-Hans", "en-US", "de-DE", "fr-FR", "es-ES", "it-IT", "th-TH", "id-ID"], - }, - { - "mode": "nova", - "name": "Nova", - "language": ["zh-Hans", "en-US", "de-DE", "fr-FR", "es-ES", "it-IT", "th-TH", "id-ID"], - }, - { - "mode": "shimmer", - "name": "Shimmer", - "language": ["zh-Hans", "en-US", "de-DE", "fr-FR", "es-ES", "it-IT", "th-TH", "id-ID"], - }, - ], - ModelPropertyKey.WORD_LIMIT: 120, - ModelPropertyKey.AUDIO_TYPE: "mp3", - ModelPropertyKey.MAX_WORKERS: 5, - }, - pricing=PriceConfig( - input=0.03, - unit=0.001, - currency="USD", - ), - ), - ), -] diff --git a/api/core/model_runtime/model_providers/azure_openai/azure_openai.py b/api/core/model_runtime/model_providers/azure_openai/azure_openai.py deleted file mode 100644 index 2e3c6aab05..0000000000 --- a/api/core/model_runtime/model_providers/azure_openai/azure_openai.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class AzureOpenAIProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml b/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml deleted file mode 100644 index 867f9fec42..0000000000 --- a/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml +++ /dev/null @@ -1,227 +0,0 @@ -provider: azure_openai -label: - en_US: Azure OpenAI Service Model -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.png -background: "#E3F0FF" -help: - title: - en_US: Get your API key from Azure - zh_Hans: 从 Azure 获取 API Key - url: - en_US: https://azure.microsoft.com/en-us/products/ai-services/openai-service -supported_model_types: - - llm - - text-embedding - - speech2text - - tts -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Deployment Name - zh_Hans: 部署名称 - placeholder: - en_US: Enter your Deployment Name here, matching the Azure deployment name. - zh_Hans: 在此输入您的部署名称,与 Azure 部署名称匹配。 - credential_form_schemas: - - variable: openai_api_base - label: - en_US: API Endpoint URL - zh_Hans: API 域名 - type: text-input - required: true - placeholder: - zh_Hans: '在此输入您的 API 域名,如:https://example.com/xxx' - en_US: 'Enter your API Endpoint, eg: https://example.com/xxx' - - variable: openai_api_key - label: - en_US: API Key - zh_Hans: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API key here - - variable: openai_api_version - label: - zh_Hans: API 版本 - en_US: API Version - type: select - required: true - options: - - label: - en_US: 2024-08-01-preview - value: 2024-08-01-preview - - label: - en_US: 2024-07-01-preview - value: 2024-07-01-preview - - label: - en_US: 2024-05-01-preview - value: 2024-05-01-preview - - label: - en_US: 2024-04-01-preview - value: 2024-04-01-preview - - label: - en_US: 2024-03-01-preview - value: 2024-03-01-preview - - label: - en_US: 2024-02-15-preview - value: 2024-02-15-preview - - label: - en_US: 2023-12-01-preview - value: 2023-12-01-preview - - label: - en_US: '2024-02-01' - value: '2024-02-01' - - label: - en_US: '2024-06-01' - value: '2024-06-01' - placeholder: - zh_Hans: 在此选择您的 API 版本 - en_US: Select your API Version here - - variable: base_model_name - label: - en_US: Base Model - zh_Hans: 基础模型 - type: select - required: true - options: - - label: - en_US: gpt-35-turbo - value: gpt-35-turbo - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-35-turbo-0125 - value: gpt-35-turbo-0125 - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-35-turbo-16k - value: gpt-35-turbo-16k - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-4 - value: gpt-4 - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-4-32k - value: gpt-4-32k - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-4o-mini - value: gpt-4o-mini - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-4o-mini-2024-07-18 - value: gpt-4o-mini-2024-07-18 - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-4o - value: gpt-4o - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-4o-2024-05-13 - value: gpt-4o-2024-05-13 - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-4o-2024-08-06 - value: gpt-4o-2024-08-06 - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-4-turbo - value: gpt-4-turbo - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-4-turbo-2024-04-09 - value: gpt-4-turbo-2024-04-09 - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-4-0125-preview - value: gpt-4-0125-preview - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-4-1106-preview - value: gpt-4-1106-preview - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-4-vision-preview - value: gpt-4-vision-preview - show_on: - - variable: __model_type - value: llm - - label: - en_US: gpt-35-turbo-instruct - value: gpt-35-turbo-instruct - show_on: - - variable: __model_type - value: llm - - label: - en_US: text-embedding-ada-002 - value: text-embedding-ada-002 - show_on: - - variable: __model_type - value: text-embedding - - label: - en_US: text-embedding-3-small - value: text-embedding-3-small - show_on: - - variable: __model_type - value: text-embedding - - label: - en_US: text-embedding-3-large - value: text-embedding-3-large - show_on: - - variable: __model_type - value: text-embedding - - label: - en_US: whisper-1 - value: whisper-1 - show_on: - - variable: __model_type - value: speech2text - - label: - en_US: tts-1 - value: tts-1 - show_on: - - variable: __model_type - value: tts - - label: - en_US: tts-1-hd - value: tts-1-hd - show_on: - - variable: __model_type - value: tts - placeholder: - zh_Hans: 在此输入您的模型版本 - en_US: Enter your model version diff --git a/api/core/model_runtime/model_providers/azure_openai/llm/__init__.py b/api/core/model_runtime/model_providers/azure_openai/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/azure_openai/llm/llm.py b/api/core/model_runtime/model_providers/azure_openai/llm/llm.py deleted file mode 100644 index f0033ea051..0000000000 --- a/api/core/model_runtime/model_providers/azure_openai/llm/llm.py +++ /dev/null @@ -1,665 +0,0 @@ -import copy -import json -import logging -from collections.abc import Generator, Sequence -from typing import Optional, Union, cast - -import tiktoken -from openai import AzureOpenAI, Stream -from openai.types import Completion -from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessageToolCall -from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall - -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageFunction, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import AIModelEntity, ModelPropertyKey -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.azure_openai._common import _CommonAzureOpenAI -from core.model_runtime.model_providers.azure_openai._constant import LLM_BASE_MODELS -from core.model_runtime.utils import helper - -logger = logging.getLogger(__name__) - - -class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - base_model_name = credentials.get("base_model_name") - if not base_model_name: - raise ValueError("Base Model Name is required") - ai_model_entity = self._get_ai_model_entity(base_model_name=base_model_name, model=model) - - if ai_model_entity and ai_model_entity.entity.model_properties.get(ModelPropertyKey.MODE) == LLMMode.CHAT.value: - # chat model - return self._chat_generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - else: - # text completion model - return self._generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - stop=stop, - stream=stream, - user=user, - ) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - base_model_name = credentials.get("base_model_name") - if not base_model_name: - raise ValueError("Base Model Name is required") - model_entity = self._get_ai_model_entity(base_model_name=base_model_name, model=model) - if not model_entity: - raise ValueError(f"Base Model Name {base_model_name} is invalid") - model_mode = model_entity.entity.model_properties.get(ModelPropertyKey.MODE) - - if model_mode == LLMMode.CHAT.value: - # chat model - return self._num_tokens_from_messages(credentials, prompt_messages, tools) - else: - # text completion model, do not support tool calling - content = prompt_messages[0].content - assert isinstance(content, str) - return self._num_tokens_from_string(credentials, content) - - def validate_credentials(self, model: str, credentials: dict) -> None: - if "openai_api_base" not in credentials: - raise CredentialsValidateFailedError("Azure OpenAI API Base Endpoint is required") - - if "openai_api_key" not in credentials: - raise CredentialsValidateFailedError("Azure OpenAI API key is required") - - if "base_model_name" not in credentials: - raise CredentialsValidateFailedError("Base Model Name is required") - - base_model_name = credentials.get("base_model_name") - if not base_model_name: - raise CredentialsValidateFailedError("Base Model Name is required") - ai_model_entity = self._get_ai_model_entity(base_model_name=base_model_name, model=model) - - if not ai_model_entity: - raise CredentialsValidateFailedError(f'Base Model Name {credentials["base_model_name"]} is invalid') - - try: - client = AzureOpenAI(**self._to_credential_kwargs(credentials)) - - if ai_model_entity.entity.model_properties.get(ModelPropertyKey.MODE) == LLMMode.CHAT.value: - # chat model - client.chat.completions.create( - messages=[{"role": "user", "content": "ping"}], - model=model, - temperature=0, - max_tokens=20, - stream=False, - ) - else: - # text completion model - client.completions.create( - prompt="ping", - model=model, - temperature=0, - max_tokens=20, - stream=False, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]: - base_model_name = credentials.get("base_model_name") - if not base_model_name: - raise ValueError("Base Model Name is required") - ai_model_entity = self._get_ai_model_entity(base_model_name=base_model_name, model=model) - return ai_model_entity.entity if ai_model_entity else None - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - client = AzureOpenAI(**self._to_credential_kwargs(credentials)) - - extra_model_kwargs = {} - - if stop: - extra_model_kwargs["stop"] = stop - - if user: - extra_model_kwargs["user"] = user - - # text completion model - response = client.completions.create( - prompt=prompt_messages[0].content, model=model, stream=stream, **model_parameters, **extra_model_kwargs - ) - - if stream: - return self._handle_generate_stream_response(model, credentials, response, prompt_messages) - - return self._handle_generate_response(model, credentials, response, prompt_messages) - - def _handle_generate_response( - self, model: str, credentials: dict, response: Completion, prompt_messages: list[PromptMessage] - ): - assistant_text = response.choices[0].text - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=assistant_text) - - # calculate num tokens - if response.usage: - # transform usage - prompt_tokens = response.usage.prompt_tokens - completion_tokens = response.usage.completion_tokens - else: - # calculate num tokens - content = prompt_messages[0].content - assert isinstance(content, str) - prompt_tokens = self._num_tokens_from_string(credentials, content) - completion_tokens = self._num_tokens_from_string(credentials, assistant_text) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - result = LLMResult( - model=response.model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - system_fingerprint=response.system_fingerprint, - ) - - return result - - def _handle_generate_stream_response( - self, model: str, credentials: dict, response: Stream[Completion], prompt_messages: list[PromptMessage] - ) -> Generator: - full_text = "" - for chunk in response: - if len(chunk.choices) == 0: - continue - - delta = chunk.choices[0] - - if delta.finish_reason is None and (delta.text is None or delta.text == ""): - continue - - # transform assistant message to prompt message - text = delta.text or "" - assistant_prompt_message = AssistantPromptMessage(content=text) - - full_text += text - - if delta.finish_reason is not None: - # calculate num tokens - if chunk.usage: - # transform usage - prompt_tokens = chunk.usage.prompt_tokens - completion_tokens = chunk.usage.completion_tokens - else: - # calculate num tokens - content = prompt_messages[0].content - assert isinstance(content, str) - prompt_tokens = self._num_tokens_from_string(credentials, content) - completion_tokens = self._num_tokens_from_string(credentials, full_text) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( - model=chunk.model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - finish_reason=delta.finish_reason, - usage=usage, - ), - ) - else: - yield LLMResultChunk( - model=chunk.model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - ), - ) - - def _chat_generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - client = AzureOpenAI(**self._to_credential_kwargs(credentials)) - - response_format = model_parameters.get("response_format") - if response_format: - if response_format == "json_schema": - json_schema = model_parameters.get("json_schema") - if not json_schema: - raise ValueError("Must define JSON Schema when the response format is json_schema") - try: - schema = json.loads(json_schema) - except: - raise ValueError(f"not correct json_schema format: {json_schema}") - model_parameters.pop("json_schema") - model_parameters["response_format"] = {"type": "json_schema", "json_schema": schema} - else: - model_parameters["response_format"] = {"type": response_format} - - extra_model_kwargs = {} - - if tools: - extra_model_kwargs["tools"] = [helper.dump_model(PromptMessageFunction(function=tool)) for tool in tools] - # extra_model_kwargs['functions'] = [{ - # "name": tool.name, - # "description": tool.description, - # "parameters": tool.parameters - # } for tool in tools] - - if stop: - extra_model_kwargs["stop"] = stop - - if user: - extra_model_kwargs["user"] = user - - # chat model - messages = [self._convert_prompt_message_to_dict(m) for m in prompt_messages] - response = client.chat.completions.create( - messages=messages, - model=model, - stream=stream, - **model_parameters, - **extra_model_kwargs, - ) - - if stream: - return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools) - - return self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools) - - def _handle_chat_generate_response( - self, - model: str, - credentials: dict, - response: ChatCompletion, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ): - assistant_message = response.choices[0].message - assistant_message_tool_calls = assistant_message.tool_calls - - # extract tool calls from response - tool_calls = [] - self._update_tool_calls(tool_calls=tool_calls, tool_calls_response=assistant_message_tool_calls) - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=assistant_message.content, tool_calls=tool_calls) - - # calculate num tokens - if response.usage: - # transform usage - prompt_tokens = response.usage.prompt_tokens - completion_tokens = response.usage.completion_tokens - else: - # calculate num tokens - prompt_tokens = self._num_tokens_from_messages(credentials, prompt_messages, tools) - completion_tokens = self._num_tokens_from_messages(credentials, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - result = LLMResult( - model=response.model or model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - system_fingerprint=response.system_fingerprint, - ) - - return result - - def _handle_chat_generate_stream_response( - self, - model: str, - credentials: dict, - response: Stream[ChatCompletionChunk], - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ): - index = 0 - full_assistant_content = "" - real_model = model - system_fingerprint = None - completion = "" - tool_calls = [] - for chunk in response: - if len(chunk.choices) == 0: - continue - - delta = chunk.choices[0] - # NOTE: For fix https://github.com/langgenius/dify/issues/5790 - if delta.delta is None: - continue - - # extract tool calls from response - self._update_tool_calls(tool_calls=tool_calls, tool_calls_response=delta.delta.tool_calls) - - # Handling exceptions when content filters' streaming mode is set to asynchronous modified filter - if delta.finish_reason is None and not delta.delta.content: - continue - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls) - - full_assistant_content += delta.delta.content or "" - - real_model = chunk.model - system_fingerprint = chunk.system_fingerprint - completion += delta.delta.content or "" - - yield LLMResultChunk( - model=real_model, - prompt_messages=prompt_messages, - system_fingerprint=system_fingerprint, - delta=LLMResultChunkDelta( - index=index, - message=assistant_prompt_message, - ), - ) - - index += 1 - - # calculate num tokens - prompt_tokens = self._num_tokens_from_messages(credentials, prompt_messages, tools) - - full_assistant_prompt_message = AssistantPromptMessage(content=completion) - completion_tokens = self._num_tokens_from_messages(credentials, [full_assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( - model=real_model, - prompt_messages=prompt_messages, - system_fingerprint=system_fingerprint, - delta=LLMResultChunkDelta( - index=index, message=AssistantPromptMessage(content=""), finish_reason="stop", usage=usage - ), - ) - - @staticmethod - def _update_tool_calls( - tool_calls: list[AssistantPromptMessage.ToolCall], - tool_calls_response: Optional[Sequence[ChatCompletionMessageToolCall | ChoiceDeltaToolCall]], - ) -> None: - if tool_calls_response: - for response_tool_call in tool_calls_response: - if isinstance(response_tool_call, ChatCompletionMessageToolCall): - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_tool_call.function.name, arguments=response_tool_call.function.arguments - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_tool_call.id, type=response_tool_call.type, function=function - ) - tool_calls.append(tool_call) - elif isinstance(response_tool_call, ChoiceDeltaToolCall): - index = response_tool_call.index - if index < len(tool_calls): - tool_calls[index].id = response_tool_call.id or tool_calls[index].id - tool_calls[index].type = response_tool_call.type or tool_calls[index].type - if response_tool_call.function: - tool_calls[index].function.name = ( - response_tool_call.function.name or tool_calls[index].function.name - ) - tool_calls[index].function.arguments += response_tool_call.function.arguments or "" - else: - assert response_tool_call.id is not None - assert response_tool_call.type is not None - assert response_tool_call.function is not None - assert response_tool_call.function.name is not None - assert response_tool_call.function.arguments is not None - - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_tool_call.function.name, arguments=response_tool_call.function.arguments - ) - tool_call = AssistantPromptMessage.ToolCall( - id=response_tool_call.id, type=response_tool_call.type, function=function - ) - tool_calls.append(tool_call) - - @staticmethod - def _convert_prompt_message_to_dict(message: PromptMessage): - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - sub_messages = [] - assert message.content is not None - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - sub_message_dict = {"type": "text", "text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - sub_message_dict = { - "type": "image_url", - "image_url": {"url": message_content.data, "detail": message_content.detail.value}, - } - sub_messages.append(sub_message_dict) - message_dict = {"role": "user", "content": sub_messages} - elif isinstance(message, AssistantPromptMessage): - # message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - if message.tool_calls: - message_dict["tool_calls"] = [helper.dump_model(tool_call) for tool_call in message.tool_calls] - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - message_dict = { - "role": "tool", - "name": message.name, - "content": message.content, - "tool_call_id": message.tool_call_id, - } - else: - raise ValueError(f"Got unknown type {message}") - - if message.name: - message_dict["name"] = message.name - - return message_dict - - def _num_tokens_from_string( - self, credentials: dict, text: str, tools: Optional[list[PromptMessageTool]] = None - ) -> int: - try: - encoding = tiktoken.encoding_for_model(credentials["base_model_name"]) - except KeyError: - encoding = tiktoken.get_encoding("cl100k_base") - - num_tokens = len(encoding.encode(text)) - - if tools: - num_tokens += self._num_tokens_for_tools(encoding, tools) - - return num_tokens - - def _num_tokens_from_messages( - self, credentials: dict, messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None - ) -> int: - """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. - - Official documentation: https://github.com/openai/openai-cookbook/blob/ - main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" - model = credentials["base_model_name"] - try: - encoding = tiktoken.encoding_for_model(model) - except KeyError: - logger.warning("Warning: model not found. Using cl100k_base encoding.") - model = "cl100k_base" - encoding = tiktoken.get_encoding(model) - - if model.startswith("gpt-35-turbo-0301"): - # every message follows {role/name}\n{content}\n - tokens_per_message = 4 - # if there's a name, the role is omitted - tokens_per_name = -1 - elif model.startswith("gpt-35-turbo") or model.startswith("gpt-4"): - tokens_per_message = 3 - tokens_per_name = 1 - else: - raise NotImplementedError( - f"get_num_tokens_from_messages() is not presently implemented " - f"for model {model}." - "See https://github.com/openai/openai-python/blob/main/chatml.md for " - "information on how messages are converted to tokens." - ) - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - # Cast str(value) in case the message value is not a string - # This occurs with function messages - # TODO: The current token calculation method for the image type is not implemented, - # which need to download the image and then get the resolution for calculation, - # and will increase the request delay - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - if key == "tool_calls": - for tool_call in value: - assert isinstance(tool_call, dict) - for t_key, t_value in tool_call.items(): - num_tokens += len(encoding.encode(t_key)) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += len(encoding.encode(f_key)) - num_tokens += len(encoding.encode(f_value)) - else: - num_tokens += len(encoding.encode(t_key)) - num_tokens += len(encoding.encode(t_value)) - else: - num_tokens += len(encoding.encode(str(value))) - - if key == "name": - num_tokens += tokens_per_name - - # every reply is primed with assistant - num_tokens += 3 - - if tools: - num_tokens += self._num_tokens_for_tools(encoding, tools) - - return num_tokens - - @staticmethod - def _num_tokens_for_tools(encoding: tiktoken.Encoding, tools: list[PromptMessageTool]) -> int: - num_tokens = 0 - for tool in tools: - num_tokens += len(encoding.encode("type")) - num_tokens += len(encoding.encode("function")) - - # calculate num tokens for function object - num_tokens += len(encoding.encode("name")) - num_tokens += len(encoding.encode(tool.name)) - num_tokens += len(encoding.encode("description")) - num_tokens += len(encoding.encode(tool.description)) - parameters = tool.parameters - num_tokens += len(encoding.encode("parameters")) - if "title" in parameters: - num_tokens += len(encoding.encode("title")) - num_tokens += len(encoding.encode(parameters["title"])) - num_tokens += len(encoding.encode("type")) - num_tokens += len(encoding.encode(parameters["type"])) - if "properties" in parameters: - num_tokens += len(encoding.encode("properties")) - for key, value in parameters["properties"].items(): - num_tokens += len(encoding.encode(key)) - for field_key, field_value in value.items(): - num_tokens += len(encoding.encode(field_key)) - if field_key == "enum": - for enum_field in field_value: - num_tokens += 3 - num_tokens += len(encoding.encode(enum_field)) - else: - num_tokens += len(encoding.encode(field_key)) - num_tokens += len(encoding.encode(str(field_value))) - if "required" in parameters: - num_tokens += len(encoding.encode("required")) - for required_field in parameters["required"]: - num_tokens += 3 - num_tokens += len(encoding.encode(required_field)) - - return num_tokens - - @staticmethod - def _get_ai_model_entity(base_model_name: str, model: str): - for ai_model_entity in LLM_BASE_MODELS: - if ai_model_entity.base_model_name == base_model_name: - ai_model_entity_copy = copy.deepcopy(ai_model_entity) - ai_model_entity_copy.entity.model = model - ai_model_entity_copy.entity.label.en_US = model - ai_model_entity_copy.entity.label.zh_Hans = model - return ai_model_entity_copy diff --git a/api/core/model_runtime/model_providers/azure_openai/speech2text/__init__.py b/api/core/model_runtime/model_providers/azure_openai/speech2text/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/azure_openai/speech2text/speech2text.py b/api/core/model_runtime/model_providers/azure_openai/speech2text/speech2text.py deleted file mode 100644 index a2b14cf3db..0000000000 --- a/api/core/model_runtime/model_providers/azure_openai/speech2text/speech2text.py +++ /dev/null @@ -1,79 +0,0 @@ -import copy -from typing import IO, Optional - -from openai import AzureOpenAI - -from core.model_runtime.entities.model_entities import AIModelEntity -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel -from core.model_runtime.model_providers.azure_openai._common import _CommonAzureOpenAI -from core.model_runtime.model_providers.azure_openai._constant import SPEECH2TEXT_BASE_MODELS, AzureBaseModel - - -class AzureOpenAISpeech2TextModel(_CommonAzureOpenAI, Speech2TextModel): - """ - Model class for OpenAI Speech to text model. - """ - - def _invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) -> str: - """ - Invoke speech2text model - - :param model: model name - :param credentials: model credentials - :param file: audio file - :param user: unique user id - :return: text for given audio file - """ - return self._speech2text_invoke(model, credentials, file) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - audio_file_path = self._get_demo_file_path() - - with open(audio_file_path, "rb") as audio_file: - self._speech2text_invoke(model, credentials, audio_file) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _speech2text_invoke(self, model: str, credentials: dict, file: IO[bytes]) -> str: - """ - Invoke speech2text model - - :param model: model name - :param credentials: model credentials - :param file: audio file - :return: text for given audio file - """ - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - - # init model client - client = AzureOpenAI(**credentials_kwargs) - - response = client.audio.transcriptions.create(model=model, file=file) - - return response.text - - def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]: - ai_model_entity = self._get_ai_model_entity(credentials["base_model_name"], model) - return ai_model_entity.entity - - @staticmethod - def _get_ai_model_entity(base_model_name: str, model: str) -> AzureBaseModel: - for ai_model_entity in SPEECH2TEXT_BASE_MODELS: - if ai_model_entity.base_model_name == base_model_name: - ai_model_entity_copy = copy.deepcopy(ai_model_entity) - ai_model_entity_copy.entity.model = model - ai_model_entity_copy.entity.label.en_US = model - ai_model_entity_copy.entity.label.zh_Hans = model - return ai_model_entity_copy - - return None diff --git a/api/core/model_runtime/model_providers/azure_openai/text_embedding/__init__.py b/api/core/model_runtime/model_providers/azure_openai/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/azure_openai/tts/__init__.py b/api/core/model_runtime/model_providers/azure_openai/tts/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/azure_openai/tts/tts.py b/api/core/model_runtime/model_providers/azure_openai/tts/tts.py deleted file mode 100644 index af178703a0..0000000000 --- a/api/core/model_runtime/model_providers/azure_openai/tts/tts.py +++ /dev/null @@ -1,128 +0,0 @@ -import concurrent.futures -import copy -from typing import Optional - -from openai import AzureOpenAI - -from core.model_runtime.entities.model_entities import AIModelEntity -from core.model_runtime.errors.invoke import InvokeBadRequestError -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.tts_model import TTSModel -from core.model_runtime.model_providers.azure_openai._common import _CommonAzureOpenAI -from core.model_runtime.model_providers.azure_openai._constant import TTS_BASE_MODELS, AzureBaseModel - - -class AzureOpenAIText2SpeechModel(_CommonAzureOpenAI, TTSModel): - """ - Model class for OpenAI Speech to text model. - """ - - def _invoke( - self, model: str, tenant_id: str, credentials: dict, content_text: str, voice: str, user: Optional[str] = None - ) -> any: - """ - _invoke text2speech model - - :param model: model name - :param tenant_id: user tenant id - :param credentials: model credentials - :param content_text: text content to be translated - :param voice: model timbre - :param user: unique user id - :return: text translated to audio file - """ - if not voice or voice not in [ - d["value"] for d in self.get_tts_model_voices(model=model, credentials=credentials) - ]: - voice = self._get_model_default_voice(model, credentials) - - return self._tts_invoke_streaming(model=model, credentials=credentials, content_text=content_text, voice=voice) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - validate credentials text2speech model - - :param model: model name - :param credentials: model credentials - :return: text translated to audio file - """ - try: - self._tts_invoke_streaming( - model=model, - credentials=credentials, - content_text="Hello Dify!", - voice=self._get_model_default_voice(model, credentials), - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str, voice: str) -> any: - """ - _tts_invoke_streaming text2speech model - :param model: model name - :param credentials: model credentials - :param content_text: text content to be translated - :param voice: model timbre - :return: text translated to audio file - """ - try: - # doc: https://platform.openai.com/docs/guides/text-to-speech - credentials_kwargs = self._to_credential_kwargs(credentials) - client = AzureOpenAI(**credentials_kwargs) - # max length is 4096 characters, there is 3500 limit for each request - max_length = 3500 - if len(content_text) > max_length: - sentences = self._split_text_into_sentences(content_text, max_length=max_length) - executor = concurrent.futures.ThreadPoolExecutor(max_workers=min(3, len(sentences))) - futures = [ - executor.submit( - client.audio.speech.with_streaming_response.create, - model=model, - response_format="mp3", - input=sentences[i], - voice=voice, - ) - for i in range(len(sentences)) - ] - for future in futures: - yield from future.result().__enter__().iter_bytes(1024) # noqa:PLC2801 - - else: - response = client.audio.speech.with_streaming_response.create( - model=model, voice=voice, response_format="mp3", input=content_text.strip() - ) - - yield from response.__enter__().iter_bytes(1024) # noqa:PLC2801 - except Exception as ex: - raise InvokeBadRequestError(str(ex)) - - def _process_sentence(self, sentence: str, model: str, voice, credentials: dict): - """ - _tts_invoke openai text2speech model api - - :param model: model name - :param credentials: model credentials - :param voice: model timbre - :param sentence: text content to be translated - :return: text translated to audio file - """ - credentials_kwargs = self._to_credential_kwargs(credentials) - client = AzureOpenAI(**credentials_kwargs) - response = client.audio.speech.create(model=model, voice=voice, input=sentence.strip()) - if isinstance(response.read(), bytes): - return response.read() - - def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]: - ai_model_entity = self._get_ai_model_entity(credentials["base_model_name"], model) - return ai_model_entity.entity - - @staticmethod - def _get_ai_model_entity(base_model_name: str, model: str) -> AzureBaseModel | None: - for ai_model_entity in TTS_BASE_MODELS: - if ai_model_entity.base_model_name == base_model_name: - ai_model_entity_copy = copy.deepcopy(ai_model_entity) - ai_model_entity_copy.entity.model = model - ai_model_entity_copy.entity.label.en_US = model - ai_model_entity_copy.entity.label.zh_Hans = model - return ai_model_entity_copy - return None diff --git a/api/core/model_runtime/model_providers/baichuan/__init__.py b/api/core/model_runtime/model_providers/baichuan/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/baichuan/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/baichuan/_assets/icon_l_en.svg deleted file mode 100644 index 7ff6b5a67a..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/_assets/icon_l_en.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/baichuan/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/baichuan/_assets/icon_s_en.svg deleted file mode 100644 index 4ddcd26726..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/_assets/icon_s_en.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/baichuan/baichuan.py b/api/core/model_runtime/model_providers/baichuan/baichuan.py deleted file mode 100644 index 626fc811cf..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/baichuan.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class BaichuanProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `baichuan2-turbo` model for validate, - model_instance.validate_credentials(model="baichuan2-turbo", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/baichuan/baichuan.yaml b/api/core/model_runtime/model_providers/baichuan/baichuan.yaml deleted file mode 100644 index 81e6e36215..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/baichuan.yaml +++ /dev/null @@ -1,29 +0,0 @@ -provider: baichuan -label: - en_US: Baichuan -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#FFF6F2" -help: - title: - en_US: Get your API Key from BAICHUAN AI - zh_Hans: 从百川智能获取您的 API Key - url: - en_US: https://www.baichuan-ai.com -supported_model_types: - - llm - - text-embedding -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/baichuan/llm/__init__.py b/api/core/model_runtime/model_providers/baichuan/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan2-53b.yaml b/api/core/model_runtime/model_providers/baichuan/llm/baichuan2-53b.yaml deleted file mode 100644 index 8360dd5faf..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan2-53b.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: baichuan2-53b -label: - en_US: Baichuan2-53B -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 1000 - min: 1 - max: 4000 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - default: 1 - min: 1 - max: 2 - - name: with_search_enhance - label: - zh_Hans: 搜索增强 - en_US: Search Enhance - type: boolean - help: - zh_Hans: 允许模型自行进行外部搜索,以增强生成结果。 - en_US: Allow the model to perform external search to enhance the generation results. - required: false -deprecated: true diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan2-turbo-192k.yaml b/api/core/model_runtime/model_providers/baichuan/llm/baichuan2-turbo-192k.yaml deleted file mode 100644 index 0ce0265cfe..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan2-turbo-192k.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: baichuan2-turbo-192k -label: - en_US: Baichuan2-Turbo-192K -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 192000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 8000 - min: 1 - max: 192000 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - default: 1 - min: 1 - max: 2 - - name: with_search_enhance - label: - zh_Hans: 搜索增强 - en_US: Search Enhance - type: boolean - help: - zh_Hans: 允许模型自行进行外部搜索,以增强生成结果。 - en_US: Allow the model to perform external search to enhance the generation results. - required: false -deprecated: true diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan2-turbo.yaml b/api/core/model_runtime/model_providers/baichuan/llm/baichuan2-turbo.yaml deleted file mode 100644 index ccb4ee8b92..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan2-turbo.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: baichuan2-turbo -label: - en_US: Baichuan2-Turbo -model_type: llm -features: - - agent-thought - - multi-tool-call -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.3 - - name: top_p - use_template: top_p - default: 0.85 - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - min: 0 - max: 20 - default: 5 - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - default: 2048 - - name: with_search_enhance - label: - zh_Hans: 搜索增强 - en_US: Search Enhance - type: boolean - help: - zh_Hans: 允许模型自行进行外部搜索,以增强生成结果。 - en_US: Allow the model to perform external search to enhance the generation results. - required: false diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo-128k.yaml b/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo-128k.yaml deleted file mode 100644 index d9cd086e82..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo-128k.yaml +++ /dev/null @@ -1,53 +0,0 @@ -model: baichuan3-turbo-128k -label: - en_US: Baichuan3-Turbo-128k -model_type: llm -features: - - agent-thought - - multi-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.3 - - name: top_p - use_template: top_p - default: 0.85 - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - min: 0 - max: 20 - default: 5 - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - default: 2048 - - name: res_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object - - name: with_search_enhance - label: - zh_Hans: 搜索增强 - en_US: Search Enhance - type: boolean - help: - zh_Hans: 允许模型自行进行外部搜索,以增强生成结果。 - en_US: Allow the model to perform external search to enhance the generation results. - required: false diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo.yaml b/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo.yaml deleted file mode 100644 index 58f9b39a43..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan3-turbo.yaml +++ /dev/null @@ -1,53 +0,0 @@ -model: baichuan3-turbo -label: - en_US: Baichuan3-Turbo -model_type: llm -features: - - agent-thought - - multi-tool-call -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.3 - - name: top_p - use_template: top_p - default: 0.85 - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - min: 0 - max: 20 - default: 5 - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - default: 2048 - - name: res_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object - - name: with_search_enhance - label: - zh_Hans: 搜索增强 - en_US: Search Enhance - type: boolean - help: - zh_Hans: 允许模型自行进行外部搜索,以增强生成结果。 - en_US: Allow the model to perform external search to enhance the generation results. - required: false diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan4.yaml b/api/core/model_runtime/model_providers/baichuan/llm/baichuan4.yaml deleted file mode 100644 index 6a1135e165..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan4.yaml +++ /dev/null @@ -1,53 +0,0 @@ -model: baichuan4 -label: - en_US: Baichuan4 -model_type: llm -features: - - agent-thought - - multi-tool-call -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.3 - - name: top_p - use_template: top_p - default: 0.85 - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - min: 0 - max: 20 - default: 5 - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - default: 2048 - - name: res_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object - - name: with_search_enhance - label: - zh_Hans: 搜索增强 - en_US: Search Enhance - type: boolean - help: - zh_Hans: 允许模型自行进行外部搜索,以增强生成结果。 - en_US: Allow the model to perform external search to enhance the generation results. - required: false diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py b/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py deleted file mode 100644 index a7ca28d49d..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py +++ /dev/null @@ -1,21 +0,0 @@ -import re - - -class BaichuanTokenizer: - @classmethod - def count_chinese_characters(cls, text: str) -> int: - return len(re.findall(r"[\u4e00-\u9fa5]", text)) - - @classmethod - def count_english_vocabularies(cls, text: str) -> int: - # remove all non-alphanumeric characters but keep spaces and other symbols like !, ., etc. - text = re.sub(r"[^a-zA-Z0-9\s]", "", text) - # count the number of words not characters - return len(text.split()) - - @classmethod - def _get_num_tokens(cls, text: str) -> int: - # tokens = number of Chinese characters + number of English words * 1.3 - # (for estimation only, subject to actual return) - # https://platform.baichuan-ai.com/docs/text-Embedding - return int(cls.count_chinese_characters(text) + cls.count_english_vocabularies(text) * 1.3) diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_turbo.py b/api/core/model_runtime/model_providers/baichuan/llm/baichuan_turbo.py deleted file mode 100644 index d5fda73009..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_turbo.py +++ /dev/null @@ -1,144 +0,0 @@ -import json -from collections.abc import Iterator -from typing import Any, Optional, Union - -from requests import post - -from core.model_runtime.entities.message_entities import PromptMessageTool -from core.model_runtime.model_providers.baichuan.llm.baichuan_turbo_errors import ( - BadRequestError, - InsufficientAccountBalanceError, - InternalServerError, - InvalidAPIKeyError, - InvalidAuthenticationError, - RateLimitReachedError, -) - - -class BaichuanModel: - api_key: str - - def __init__(self, api_key: str) -> None: - self.api_key = api_key - - @property - def _model_mapping(self) -> dict: - return { - "baichuan2-turbo": "Baichuan2-Turbo", - "baichuan3-turbo": "Baichuan3-Turbo", - "baichuan3-turbo-128k": "Baichuan3-Turbo-128k", - "baichuan4": "Baichuan4", - } - - @property - def request_headers(self) -> dict[str, Any]: - return { - "Content-Type": "application/json", - "Authorization": "Bearer " + self.api_key, - } - - def _build_parameters( - self, - model: str, - stream: bool, - messages: list[dict], - parameters: dict[str, Any], - tools: Optional[list[PromptMessageTool]] = None, - ) -> dict[str, Any]: - if model in self._model_mapping: - # the LargeLanguageModel._code_block_mode_wrapper() method will remove the response_format of parameters. - # we need to rename it to res_format to get its value - if parameters.get("res_format") == "json_object": - parameters["response_format"] = {"type": "json_object"} - - if tools or parameters.get("with_search_enhance") is True: - parameters["tools"] = [] - - # with_search_enhance is deprecated, use web_search instead - if parameters.get("with_search_enhance") is True: - parameters["tools"].append( - { - "type": "web_search", - "web_search": {"enable": True}, - } - ) - if tools: - for tool in tools: - parameters["tools"].append( - { - "type": "function", - "function": { - "name": tool.name, - "description": tool.description, - "parameters": tool.parameters, - }, - } - ) - - # turbo api accepts flat parameters - return { - "model": self._model_mapping.get(model), - "stream": stream, - "messages": messages, - **parameters, - } - else: - raise BadRequestError(f"Unknown model: {model}") - - def generate( - self, - model: str, - stream: bool, - messages: list[dict], - parameters: dict[str, Any], - timeout: int, - tools: Optional[list[PromptMessageTool]] = None, - ) -> Union[Iterator, dict]: - if model in self._model_mapping: - api_base = "https://api.baichuan-ai.com/v1/chat/completions" - else: - raise BadRequestError(f"Unknown model: {model}") - - data = self._build_parameters(model, stream, messages, parameters, tools) - - try: - response = post( - url=api_base, - headers=self.request_headers, - data=json.dumps(data), - timeout=timeout, - stream=stream, - ) - except Exception as e: - raise InternalServerError(f"Failed to invoke model: {e}") - - if response.status_code != 200: - try: - resp = response.json() - # try to parse error message - err = resp["error"]["type"] - msg = resp["error"]["message"] - except Exception as e: - raise InternalServerError(f"Failed to convert response to json: {e} with text: {response.text}") - - if err == "invalid_api_key": - raise InvalidAPIKeyError(msg) - elif err == "insufficient_quota": - raise InsufficientAccountBalanceError(msg) - elif err == "invalid_authentication": - raise InvalidAuthenticationError(msg) - elif err == "invalid_request_error": - raise BadRequestError(msg) - elif "rate" in err: - raise RateLimitReachedError(msg) - elif "internal" in err: - raise InternalServerError(msg) - elif err == "api_key_empty": - raise InvalidAPIKeyError(msg) - else: - raise InternalServerError(f"Unknown error: {err} with message: {msg}") - - if stream: - return response.iter_lines() - else: - return response.json() diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_turbo_errors.py b/api/core/model_runtime/model_providers/baichuan/llm/baichuan_turbo_errors.py deleted file mode 100644 index 309b5cf413..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_turbo_errors.py +++ /dev/null @@ -1,22 +0,0 @@ -class InvalidAuthenticationError(Exception): - pass - - -class InvalidAPIKeyError(Exception): - pass - - -class RateLimitReachedError(Exception): - pass - - -class InsufficientAccountBalanceError(Exception): - pass - - -class InternalServerError(Exception): - pass - - -class BadRequestError(Exception): - pass diff --git a/api/core/model_runtime/model_providers/baichuan/llm/llm.py b/api/core/model_runtime/model_providers/baichuan/llm/llm.py deleted file mode 100644 index 91a14bf100..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/llm/llm.py +++ /dev/null @@ -1,296 +0,0 @@ -import json -from collections.abc import Generator, Iterator -from typing import cast - -from core.model_runtime.entities.llm_entities import ( - LLMResult, - LLMResultChunk, - LLMResultChunkDelta, -) -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.baichuan.llm.baichuan_tokenizer import BaichuanTokenizer -from core.model_runtime.model_providers.baichuan.llm.baichuan_turbo import BaichuanModel -from core.model_runtime.model_providers.baichuan.llm.baichuan_turbo_errors import ( - BadRequestError, - InsufficientAccountBalanceError, - InternalServerError, - InvalidAPIKeyError, - InvalidAuthenticationError, - RateLimitReachedError, -) - - -class BaichuanLanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - return self._generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stream=stream, - ) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool] | None = None, - ) -> int: - return self._num_tokens_from_messages(prompt_messages) - - def _num_tokens_from_messages( - self, - messages: list[PromptMessage], - ) -> int: - """Calculate num tokens for baichuan model""" - - def tokens(text: str): - return BaichuanTokenizer._get_num_tokens(text) - - tokens_per_message = 3 - - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - num_tokens += tokens(str(value)) - num_tokens += 3 - - return num_tokens - - def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: - """ - Convert PromptMessage to dict for Baichuan - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - raise ValueError("User message content must be str") - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - if message.tool_calls: - message_dict["tool_calls"] = [tool_call.dict() for tool_call in message.tool_calls] - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - message_dict = {"role": "tool", "content": message.content, "tool_call_id": message.tool_call_id} - else: - raise ValueError(f"Unknown message type {type(message)}") - - return message_dict - - def validate_credentials(self, model: str, credentials: dict) -> None: - # ping - instance = BaichuanModel(api_key=credentials["api_key"]) - - try: - instance.generate( - model=model, - stream=False, - messages=[{"content": "ping", "role": "user"}], - parameters={ - "max_tokens": 1, - }, - timeout=60, - ) - except Exception as e: - raise CredentialsValidateFailedError(f"Invalid API key: {e}") - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stream: bool = True, - ) -> LLMResult | Generator: - instance = BaichuanModel(api_key=credentials["api_key"]) - messages = [self._convert_prompt_message_to_dict(m) for m in prompt_messages] - - # invoke model - response = instance.generate( - model=model, - stream=stream, - messages=messages, - parameters=model_parameters, - timeout=60, - tools=tools, - ) - - if stream: - return self._handle_chat_generate_stream_response(model, prompt_messages, credentials, response) - - return self._handle_chat_generate_response(model, prompt_messages, credentials, response) - - def _handle_chat_generate_response( - self, - model: str, - prompt_messages: list[PromptMessage], - credentials: dict, - response: dict, - ) -> LLMResult: - choices = response.get("choices", []) - assistant_message = AssistantPromptMessage(content="", tool_calls=[]) - if choices and choices[0]["finish_reason"] == "tool_calls": - for choice in choices: - for tool_call in choice["message"]["tool_calls"]: - tool = AssistantPromptMessage.ToolCall( - id=tool_call.get("id", ""), - type=tool_call.get("type", ""), - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=tool_call.get("function", {}).get("name", ""), - arguments=tool_call.get("function", {}).get("arguments", ""), - ), - ) - assistant_message.tool_calls.append(tool) - else: - for choice in choices: - assistant_message.content += choice["message"]["content"] - assistant_message.role = choice["message"]["role"] - - usage = response.get("usage") - if usage: - # transform usage - prompt_tokens = usage["prompt_tokens"] - completion_tokens = usage["completion_tokens"] - else: - # calculate num tokens - prompt_tokens = self._num_tokens_from_messages(prompt_messages) - completion_tokens = self._num_tokens_from_messages([assistant_message]) - - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - ) - - return LLMResult( - model=model, - prompt_messages=prompt_messages, - message=assistant_message, - usage=usage, - ) - - def _handle_chat_generate_stream_response( - self, - model: str, - prompt_messages: list[PromptMessage], - credentials: dict, - response: Iterator, - ) -> Generator: - for line in response: - if not line: - continue - line = line.decode("utf-8") - # remove the first `data: ` prefix - if line.startswith("data:"): - line = line[5:].strip() - try: - data = json.loads(line) - except Exception as e: - if line.strip() == "[DONE]": - return - choices = data.get("choices", []) - - stop_reason = "" - for choice in choices: - if choice.get("finish_reason"): - stop_reason = choice["finish_reason"] - - if len(choice["delta"]["content"]) == 0: - continue - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=choice["delta"]["content"], tool_calls=[]), - finish_reason=stop_reason, - ), - ) - - # if there is usage, the response is the last one, yield it and return - if "usage" in data: - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=data["usage"]["prompt_tokens"], - completion_tokens=data["usage"]["completion_tokens"], - ) - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content="", tool_calls=[]), - usage=usage, - finish_reason=stop_reason, - ), - ) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [], - InvokeServerUnavailableError: [InternalServerError], - InvokeRateLimitError: [RateLimitReachedError], - InvokeAuthorizationError: [ - InvalidAuthenticationError, - InsufficientAccountBalanceError, - InvalidAPIKeyError, - ], - InvokeBadRequestError: [BadRequestError, KeyError], - } diff --git a/api/core/model_runtime/model_providers/baichuan/text_embedding/__init__.py b/api/core/model_runtime/model_providers/baichuan/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/baichuan/text_embedding/baichuan-text-embedding.yaml b/api/core/model_runtime/model_providers/baichuan/text_embedding/baichuan-text-embedding.yaml deleted file mode 100644 index 67e5fcc47c..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/text_embedding/baichuan-text-embedding.yaml +++ /dev/null @@ -1,5 +0,0 @@ -model: baichuan-text-embedding -model_type: text-embedding -model_properties: - context_size: 512 - max_chunks: 16 diff --git a/api/core/model_runtime/model_providers/bedrock/__init__.py b/api/core/model_runtime/model_providers/bedrock/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/bedrock/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/bedrock/_assets/icon_l_en.svg deleted file mode 100644 index 667db50800..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/_assets/icon_l_en.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/bedrock/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/bedrock/_assets/icon_s_en.svg deleted file mode 100644 index 6a0235af92..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/_assets/icon_s_en.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/bedrock/bedrock.py b/api/core/model_runtime/model_providers/bedrock/bedrock.py deleted file mode 100644 index 1cfc1d199c..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/bedrock.py +++ /dev/null @@ -1,29 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class BedrockProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `amazon.titan-text-lite-v1` model by default for validating credentials - model_for_validation = credentials.get("model_for_validation", "amazon.titan-text-lite-v1") - model_instance.validate_credentials(model=model_for_validation, credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/bedrock/bedrock.yaml b/api/core/model_runtime/model_providers/bedrock/bedrock.yaml deleted file mode 100644 index c540ee23b3..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/bedrock.yaml +++ /dev/null @@ -1,89 +0,0 @@ -provider: bedrock -label: - en_US: AWS -description: - en_US: AWS Bedrock's models. -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#FCFDFF" -help: - title: - en_US: Get your Access Key and Secret Access Key from AWS Console - url: - en_US: https://console.aws.amazon.com/ -supported_model_types: - - llm - - text-embedding -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: aws_access_key_id - required: false - label: - en_US: Access Key (If not provided, credentials are obtained from the running environment.) - zh_Hans: Access Key - type: secret-input - placeholder: - en_US: Enter your Access Key - zh_Hans: 在此输入您的 Access Key - - variable: aws_secret_access_key - required: false - label: - en_US: Secret Access Key - zh_Hans: Secret Access Key - type: secret-input - placeholder: - en_US: Enter your Secret Access Key - zh_Hans: 在此输入您的 Secret Access Key - - variable: aws_region - required: true - label: - en_US: AWS Region - zh_Hans: AWS 地区 - type: select - default: us-east-1 - options: - - value: us-east-1 - label: - en_US: US East (N. Virginia) - zh_Hans: 美国东部 (弗吉尼亚北部) - - value: us-west-2 - label: - en_US: US West (Oregon) - zh_Hans: 美国西部 (俄勒冈州) - - value: ap-southeast-1 - label: - en_US: Asia Pacific (Singapore) - zh_Hans: 亚太地区 (新加坡) - - value: ap-northeast-1 - label: - en_US: Asia Pacific (Tokyo) - zh_Hans: 亚太地区 (东京) - - value: eu-central-1 - label: - en_US: Europe (Frankfurt) - zh_Hans: 欧洲 (法兰克福) - - value: eu-west-2 - label: - en_US: Eu west London (London) - zh_Hans: 欧洲西部 (伦敦) - - value: us-gov-west-1 - label: - en_US: AWS GovCloud (US-West) - zh_Hans: AWS GovCloud (US-West) - - value: ap-southeast-2 - label: - en_US: Asia Pacific (Sydney) - zh_Hans: 亚太地区 (悉尼) - - variable: model_for_validation - required: false - label: - en_US: Available Model Name - zh_Hans: 可用模型名称 - type: text-input - placeholder: - en_US: A model you have access to (e.g. amazon.titan-text-lite-v1) for validation. - zh_Hans: 为了进行验证,请输入一个您可用的模型名称 (例如:amazon.titan-text-lite-v1) diff --git a/api/core/model_runtime/model_providers/bedrock/llm/__init__.py b/api/core/model_runtime/model_providers/bedrock/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/bedrock/llm/_position.yaml b/api/core/model_runtime/model_providers/bedrock/llm/_position.yaml deleted file mode 100644 index 86c8061dee..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/_position.yaml +++ /dev/null @@ -1,24 +0,0 @@ -- amazon.titan-text-express-v1 -- amazon.titan-text-lite-v1 -- anthropic.claude-instant-v1 -- anthropic.claude-v1 -- anthropic.claude-v2 -- anthropic.claude-v2:1 -- anthropic.claude-3-sonnet-v1:0 -- anthropic.claude-3-haiku-v1:0 -- cohere.command-light-text-v14 -- cohere.command-text-v14 -- cohere.command-r-plus-v1.0 -- cohere.command-r-v1.0 -- meta.llama3-1-8b-instruct-v1:0 -- meta.llama3-1-70b-instruct-v1:0 -- meta.llama3-1-405b-instruct-v1:0 -- meta.llama3-8b-instruct-v1:0 -- meta.llama3-70b-instruct-v1:0 -- meta.llama2-13b-chat-v1 -- meta.llama2-70b-chat-v1 -- mistral.mistral-large-2407-v1:0 -- mistral.mistral-small-2402-v1:0 -- mistral.mistral-large-2402-v1:0 -- mistral.mixtral-8x7b-instruct-v0:1 -- mistral.mistral-7b-instruct-v0:2 diff --git a/api/core/model_runtime/model_providers/bedrock/llm/ai21.j2-mid-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/ai21.j2-mid-v1.yaml deleted file mode 100644 index 65dad02969..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/ai21.j2-mid-v1.yaml +++ /dev/null @@ -1,47 +0,0 @@ -model: ai21.j2-mid-v1 -label: - en_US: J2 Mid V1 -model_type: llm -model_properties: - mode: completion - context_size: 8191 -parameter_rules: - - name: temperature - use_template: temperature - - name: topP - use_template: top_p - - name: maxTokens - use_template: max_tokens - required: true - default: 2048 - min: 1 - max: 2048 - - name: count_penalty - label: - en_US: Count Penalty - required: false - type: float - default: 0 - min: 0 - max: 1 - - name: presence_penalty - label: - en_US: Presence Penalty - required: false - type: float - default: 0 - min: 0 - max: 5 - - name: frequency_penalty - label: - en_US: Frequency Penalty - required: false - type: float - default: 0 - min: 0 - max: 500 -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/ai21.j2-ultra-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/ai21.j2-ultra-v1.yaml deleted file mode 100644 index b72f8064bd..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/ai21.j2-ultra-v1.yaml +++ /dev/null @@ -1,47 +0,0 @@ -model: ai21.j2-ultra-v1 -label: - en_US: J2 Ultra V1 -model_type: llm -model_properties: - mode: completion - context_size: 8191 -parameter_rules: - - name: temperature - use_template: temperature - - name: topP - use_template: top_p - - name: maxTokens - use_template: max_tokens - required: true - default: 2048 - min: 1 - max: 2048 - - name: count_penalty - label: - en_US: Count Penalty - required: false - type: float - default: 0 - min: 0 - max: 1 - - name: presence_penalty - label: - en_US: Presence Penalty - required: false - type: float - default: 0 - min: 0 - max: 5 - - name: frequency_penalty - label: - en_US: Frequency Penalty - required: false - type: float - default: 0 - min: 0 - max: 500 -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/amazon.titan-text-express-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/amazon.titan-text-express-v1.yaml deleted file mode 100644 index 543c16d5cd..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/amazon.titan-text-express-v1.yaml +++ /dev/null @@ -1,23 +0,0 @@ -model: amazon.titan-text-express-v1 -label: - en_US: Titan Text G1 - Express -model_type: llm -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: topP - use_template: top_p - - name: maxTokenCount - use_template: max_tokens - required: true - default: 2048 - min: 1 - max: 8000 -pricing: - input: '0.0008' - output: '0.0016' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/amazon.titan-text-lite-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/amazon.titan-text-lite-v1.yaml deleted file mode 100644 index 2c6151c239..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/amazon.titan-text-lite-v1.yaml +++ /dev/null @@ -1,23 +0,0 @@ -model: amazon.titan-text-lite-v1 -label: - en_US: Titan Text G1 - Lite -model_type: llm -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: topP - use_template: top_p - - name: maxTokenCount - use_template: max_tokens - required: true - default: 2048 - min: 1 - max: 2048 -pricing: - input: '0.0003' - output: '0.0004' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-haiku-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-haiku-v1.yaml deleted file mode 100644 index c2d5eb6471..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-haiku-v1.yaml +++ /dev/null @@ -1,61 +0,0 @@ -model: anthropic.claude-3-haiku-20240307-v1:0 -label: - en_US: Claude 3 Haiku -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - # docs: https://docs.anthropic.com/claude/docs/system-prompts - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. - - name: response_format - use_template: response_format -pricing: - input: '0.00025' - output: '0.00125' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-opus-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-opus-v1.yaml deleted file mode 100644 index f90fa04266..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-opus-v1.yaml +++ /dev/null @@ -1,61 +0,0 @@ -model: anthropic.claude-3-opus-20240229-v1:0 -label: - en_US: Claude 3 Opus -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - # docs: https://docs.anthropic.com/claude/docs/system-prompts - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. - - name: response_format - use_template: response_format -pricing: - input: '0.015' - output: '0.075' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.5.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.5.yaml deleted file mode 100644 index dad0d6b6b6..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.5.yaml +++ /dev/null @@ -1,60 +0,0 @@ -model: anthropic.claude-3-5-sonnet-20240620-v1:0 -label: - en_US: Claude 3.5 Sonnet -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. - - name: response_format - use_template: response_format -pricing: - input: '0.003' - output: '0.015' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.yaml deleted file mode 100644 index 962def8011..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.yaml +++ /dev/null @@ -1,60 +0,0 @@ -model: anthropic.claude-3-sonnet-20240229-v1:0 -label: - en_US: Claude 3 Sonnet -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. - - name: response_format - use_template: response_format -pricing: - input: '0.003' - output: '0.015' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-instant-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-instant-v1.yaml deleted file mode 100644 index 8422f079c5..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-instant-v1.yaml +++ /dev/null @@ -1,52 +0,0 @@ -model: anthropic.claude-instant-v1 -label: - en_US: Claude Instant 1 -model_type: llm -model_properties: - mode: chat - context_size: 100000 -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.0008' - output: '0.0024' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-v1.yaml deleted file mode 100644 index 6a714b1055..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-v1.yaml +++ /dev/null @@ -1,53 +0,0 @@ -model: anthropic.claude-v1 -label: - en_US: Claude 1 -model_type: llm -model_properties: - mode: chat - context_size: 100000 -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.008' - output: '0.024' - unit: '0.001' - currency: USD -deprecated: true diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-v2.1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-v2.1.yaml deleted file mode 100644 index 70294e4ad3..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-v2.1.yaml +++ /dev/null @@ -1,54 +0,0 @@ -model: anthropic.claude-v2:1 -label: - en_US: Claude 2.1 -model_type: llm -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. - - name: response_format - use_template: response_format -pricing: - input: '0.008' - output: '0.024' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-v2.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-v2.yaml deleted file mode 100644 index 0a8ea61b6d..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-v2.yaml +++ /dev/null @@ -1,54 +0,0 @@ -model: anthropic.claude-v2 -label: - en_US: Claude 2 -model_type: llm -model_properties: - mode: chat - context_size: 100000 -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. - - name: response_format - use_template: response_format -pricing: - input: '0.008' - output: '0.024' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/cohere.command-light-text-v14.yaml b/api/core/model_runtime/model_providers/bedrock/llm/cohere.command-light-text-v14.yaml deleted file mode 100644 index 7450009551..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/cohere.command-light-text-v14.yaml +++ /dev/null @@ -1,35 +0,0 @@ -model: cohere.command-light-text-v14 -label: - en_US: Command Light Text V14 -model_type: llm -model_properties: - mode: completion - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: p - use_template: top_p - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - min: 0 - max: 500 - default: 0 - - name: max_tokens - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 -pricing: - input: '0.0003' - output: '0.0006' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/cohere.command-r-plus-v1.0.yaml b/api/core/model_runtime/model_providers/bedrock/llm/cohere.command-r-plus-v1.0.yaml deleted file mode 100644 index 3c0bb4e8d5..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/cohere.command-r-plus-v1.0.yaml +++ /dev/null @@ -1,44 +0,0 @@ -model: cohere.command-r-plus-v1:0 -label: - en_US: Command R+ -model_type: llm -features: - - tool-call - #- stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 -pricing: - input: '3' - output: '15' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/cohere.command-r-v1.0.yaml b/api/core/model_runtime/model_providers/bedrock/llm/cohere.command-r-v1.0.yaml deleted file mode 100644 index a34f48319f..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/cohere.command-r-v1.0.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: cohere.command-r-v1:0 -label: - en_US: Command R -model_type: llm -features: - - tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 -pricing: - input: '0.5' - output: '1.5' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/cohere.command-text-v14.yaml b/api/core/model_runtime/model_providers/bedrock/llm/cohere.command-text-v14.yaml deleted file mode 100644 index 6aea5be170..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/cohere.command-text-v14.yaml +++ /dev/null @@ -1,32 +0,0 @@ -model: cohere.command-text-v14 -label: - en_US: Command Text V14 -model_type: llm -model_properties: - mode: completion - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: p - use_template: top_p - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 -pricing: - input: '0.0015' - output: '0.0020' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-haiku-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-haiku-v1.yaml deleted file mode 100644 index 24a65ef1bb..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-haiku-v1.yaml +++ /dev/null @@ -1,59 +0,0 @@ -model: eu.anthropic.claude-3-haiku-20240307-v1:0 -label: - en_US: Claude 3 Haiku(EU.Cross Region Inference) -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - # docs: https://docs.anthropic.com/claude/docs/system-prompts - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.00025' - output: '0.00125' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.5.yaml b/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.5.yaml deleted file mode 100644 index e3d25c7d8f..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.5.yaml +++ /dev/null @@ -1,58 +0,0 @@ -model: eu.anthropic.claude-3-5-sonnet-20240620-v1:0 -label: - en_US: Claude 3.5 Sonnet(EU.Cross Region Inference) -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.003' - output: '0.015' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.yaml deleted file mode 100644 index 9a06a4ad6d..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/eu.anthropic.claude-3-sonnet-v1.yaml +++ /dev/null @@ -1,58 +0,0 @@ -model: eu.anthropic.claude-3-sonnet-20240229-v1:0 -label: - en_US: Claude 3 Sonnet(EU.Cross Region Inference) -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.003' - output: '0.015' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/llm.py b/api/core/model_runtime/model_providers/bedrock/llm/llm.py deleted file mode 100644 index 77bab0c294..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/llm.py +++ /dev/null @@ -1,903 +0,0 @@ -# standard import -import base64 -import json -import logging -import mimetypes -from collections.abc import Generator -from typing import Optional, Union, cast - -# 3rd import -import boto3 -import requests -from botocore.config import Config -from botocore.exceptions import ( - ClientError, - EndpointConnectionError, - NoRegionError, - ServiceNotInRegionError, - UnknownServiceError, -) - -# local import -from core.model_runtime.callbacks.base_callback import Callback -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel - -logger = logging.getLogger(__name__) -ANTHROPIC_BLOCK_MODE_PROMPT = """You should always follow the instructions and output a valid {{block}} object. -The structure of the {{block}} object you can found in the instructions, use {"answer": "$your_answer"} as the default structure -if you are not sure about the structure. - - -{{instructions}} - -""" # noqa: E501 - - -class BedrockLargeLanguageModel(LargeLanguageModel): - # please refer to the documentation: https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html - # TODO There is invoke issue: context limit on Cohere Model, will add them after fixed. - CONVERSE_API_ENABLED_MODEL_INFO = [ - {"prefix": "anthropic.claude-v2", "support_system_prompts": True, "support_tool_use": False}, - {"prefix": "anthropic.claude-v1", "support_system_prompts": True, "support_tool_use": False}, - {"prefix": "us.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True}, - {"prefix": "eu.anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True}, - {"prefix": "anthropic.claude-3", "support_system_prompts": True, "support_tool_use": True}, - {"prefix": "meta.llama", "support_system_prompts": True, "support_tool_use": False}, - {"prefix": "mistral.mistral-7b-instruct", "support_system_prompts": False, "support_tool_use": False}, - {"prefix": "mistral.mixtral-8x7b-instruct", "support_system_prompts": False, "support_tool_use": False}, - {"prefix": "mistral.mistral-large", "support_system_prompts": True, "support_tool_use": True}, - {"prefix": "mistral.mistral-small", "support_system_prompts": True, "support_tool_use": True}, - {"prefix": "cohere.command-r", "support_system_prompts": True, "support_tool_use": True}, - {"prefix": "amazon.titan", "support_system_prompts": False, "support_tool_use": False}, - ] - - @staticmethod - def _find_model_info(model_id): - for model in BedrockLargeLanguageModel.CONVERSE_API_ENABLED_MODEL_INFO: - if model_id.startswith(model["prefix"]): - return model - logger.info(f"current model id: {model_id} did not support by Converse API") - return None - - def _code_block_mode_wrapper( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - callbacks: list[Callback] = None, - ) -> Union[LLMResult, Generator]: - """ - Code block mode wrapper for invoking large language model - """ - if model_parameters.get("response_format"): - stop = stop or [] - if "```\n" not in stop: - stop.append("```\n") - if "\n```" not in stop: - stop.append("\n```") - response_format = model_parameters.pop("response_format") - format_prompt = SystemPromptMessage( - content=ANTHROPIC_BLOCK_MODE_PROMPT.replace("{{instructions}}", prompt_messages[0].content).replace( - "{{block}}", response_format - ) - ) - if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage): - prompt_messages[0] = format_prompt - else: - prompt_messages.insert(0, format_prompt) - prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}")) - return self._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - - model_info = BedrockLargeLanguageModel._find_model_info(model) - if model_info: - model_info["model"] = model - # invoke models via boto3 converse API - return self._generate_with_converse( - model_info, credentials, prompt_messages, model_parameters, stop, stream, user, tools - ) - # invoke other models via boto3 client - return self._generate(model, credentials, prompt_messages, model_parameters, stop, stream, user) - - def _generate_with_converse( - self, - model_info: dict, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - tools: Optional[list[PromptMessageTool]] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model with converse API - - :param model_info: model information - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :return: full response or stream response chunk generator result - """ - bedrock_client = boto3.client( - service_name="bedrock-runtime", - aws_access_key_id=credentials.get("aws_access_key_id"), - aws_secret_access_key=credentials.get("aws_secret_access_key"), - region_name=credentials["aws_region"], - ) - - system, prompt_message_dicts = self._convert_converse_prompt_messages(prompt_messages) - inference_config, additional_model_fields = self._convert_converse_api_model_parameters(model_parameters, stop) - - parameters = { - "modelId": model_info["model"], - "messages": prompt_message_dicts, - "inferenceConfig": inference_config, - "additionalModelRequestFields": additional_model_fields, - } - - if model_info["support_system_prompts"] and system and len(system) > 0: - parameters["system"] = system - - if model_info["support_tool_use"] and tools: - parameters["toolConfig"] = self._convert_converse_tool_config(tools=tools) - try: - if stream: - response = bedrock_client.converse_stream(**parameters) - return self._handle_converse_stream_response( - model_info["model"], credentials, response, prompt_messages - ) - else: - response = bedrock_client.converse(**parameters) - return self._handle_converse_response(model_info["model"], credentials, response, prompt_messages) - except ClientError as ex: - error_code = ex.response["Error"]["Code"] - full_error_msg = f"{error_code}: {ex.response['Error']['Message']}" - raise self._map_client_to_invoke_error(error_code, full_error_msg) - except (EndpointConnectionError, NoRegionError, ServiceNotInRegionError) as ex: - raise InvokeConnectionError(str(ex)) - - except UnknownServiceError as ex: - raise InvokeServerUnavailableError(str(ex)) - - except Exception as ex: - raise InvokeError(str(ex)) - - def _handle_converse_response( - self, model: str, credentials: dict, response: dict, prompt_messages: list[PromptMessage] - ) -> LLMResult: - """ - Handle llm chat response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: full response chunk generator result - """ - response_content = response["output"]["message"]["content"] - # transform assistant message to prompt message - if response["stopReason"] == "tool_use": - tool_calls = [] - text, tool_use = self._extract_tool_use(response_content) - - tool_call = AssistantPromptMessage.ToolCall( - id=tool_use["toolUseId"], - type="function", - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=tool_use["name"], arguments=json.dumps(tool_use["input"]) - ), - ) - tool_calls.append(tool_call) - - assistant_prompt_message = AssistantPromptMessage(content=text, tool_calls=tool_calls) - else: - assistant_prompt_message = AssistantPromptMessage(content=response_content[0]["text"]) - - # calculate num tokens - if response["usage"]: - # transform usage - prompt_tokens = response["usage"]["inputTokens"] - completion_tokens = response["usage"]["outputTokens"] - else: - # calculate num tokens - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - result = LLMResult( - model=model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - ) - return result - - def _extract_tool_use(self, content: dict) -> tuple[str, dict]: - tool_use = {} - text = "" - for item in content: - if "toolUse" in item: - tool_use = item["toolUse"] - elif "text" in item: - text = item["text"] - else: - raise ValueError(f"Got unknown item: {item}") - return text, tool_use - - def _handle_converse_stream_response( - self, - model: str, - credentials: dict, - response: dict, - prompt_messages: list[PromptMessage], - ) -> Generator: - """ - Handle llm chat stream response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: full response or stream response chunk generator result - """ - - try: - full_assistant_content = "" - return_model = None - input_tokens = 0 - output_tokens = 0 - finish_reason = None - index = 0 - tool_calls: list[AssistantPromptMessage.ToolCall] = [] - tool_use = {} - - for chunk in response["stream"]: - if "messageStart" in chunk: - return_model = model - elif "messageStop" in chunk: - finish_reason = chunk["messageStop"]["stopReason"] - elif "contentBlockStart" in chunk: - tool = chunk["contentBlockStart"]["start"]["toolUse"] - tool_use["toolUseId"] = tool["toolUseId"] - tool_use["name"] = tool["name"] - elif "metadata" in chunk: - input_tokens = chunk["metadata"]["usage"]["inputTokens"] - output_tokens = chunk["metadata"]["usage"]["outputTokens"] - usage = self._calc_response_usage(model, credentials, input_tokens, output_tokens) - yield LLMResultChunk( - model=return_model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=AssistantPromptMessage(content="", tool_calls=tool_calls), - finish_reason=finish_reason, - usage=usage, - ), - ) - elif "contentBlockDelta" in chunk: - delta = chunk["contentBlockDelta"]["delta"] - if "text" in delta: - chunk_text = delta["text"] or "" - full_assistant_content += chunk_text - assistant_prompt_message = AssistantPromptMessage( - content=chunk_text or "", - ) - index = chunk["contentBlockDelta"]["contentBlockIndex"] - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index + 1, - message=assistant_prompt_message, - ), - ) - elif "toolUse" in delta: - if "input" not in tool_use: - tool_use["input"] = "" - tool_use["input"] += delta["toolUse"]["input"] - elif "contentBlockStop" in chunk: - if "input" in tool_use: - tool_call = AssistantPromptMessage.ToolCall( - id=tool_use["toolUseId"], - type="function", - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=tool_use["name"], arguments=tool_use["input"] - ), - ) - tool_calls.append(tool_call) - tool_use = {} - - except Exception as ex: - raise InvokeError(str(ex)) - - def _convert_converse_api_model_parameters( - self, model_parameters: dict, stop: Optional[list[str]] = None - ) -> tuple[dict, dict]: - inference_config = {} - additional_model_fields = {} - if "max_tokens" in model_parameters: - inference_config["maxTokens"] = model_parameters["max_tokens"] - - if "temperature" in model_parameters: - inference_config["temperature"] = model_parameters["temperature"] - - if "top_p" in model_parameters: - inference_config["topP"] = model_parameters["temperature"] - - if stop: - inference_config["stopSequences"] = stop - - if "top_k" in model_parameters: - additional_model_fields["top_k"] = model_parameters["top_k"] - - return inference_config, additional_model_fields - - def _convert_converse_prompt_messages(self, prompt_messages: list[PromptMessage]) -> tuple[str, list[dict]]: - """ - Convert prompt messages to dict list and system - """ - - system = [] - prompt_message_dicts = [] - for message in prompt_messages: - if isinstance(message, SystemPromptMessage): - message.content = message.content.strip() - system.append({"text": message.content}) - else: - prompt_message_dicts.append(self._convert_prompt_message_to_dict(message)) - - return system, prompt_message_dicts - - def _convert_converse_tool_config(self, tools: Optional[list[PromptMessageTool]] = None) -> dict: - tool_config = {} - configs = [] - if tools: - for tool in tools: - configs.append( - { - "toolSpec": { - "name": tool.name, - "description": tool.description, - "inputSchema": {"json": tool.parameters}, - } - } - ) - tool_config["tools"] = configs - return tool_config - - def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: - """ - Convert PromptMessage to dict - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": [{"text": message.content}]} - else: - sub_messages = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - sub_message_dict = {"text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - if not message_content.data.startswith("data:"): - # fetch image data from url - try: - url = message_content.data - image_content = requests.get(url).content - if "?" in url: - url = url.split("?")[0] - mime_type, _ = mimetypes.guess_type(url) - base64_data = base64.b64encode(image_content).decode("utf-8") - except Exception as ex: - raise ValueError(f"Failed to fetch image data from url {message_content.data}, {ex}") - else: - data_split = message_content.data.split(";base64,") - mime_type = data_split[0].replace("data:", "") - base64_data = data_split[1] - image_content = base64.b64decode(base64_data) - - if mime_type not in {"image/jpeg", "image/png", "image/gif", "image/webp"}: - raise ValueError( - f"Unsupported image type {mime_type}, " - f"only support image/jpeg, image/png, image/gif, and image/webp" - ) - - sub_message_dict = { - "image": {"format": mime_type.replace("image/", ""), "source": {"bytes": image_content}} - } - sub_messages.append(sub_message_dict) - - message_dict = {"role": "user", "content": sub_messages} - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - if message.tool_calls: - message_dict = { - "role": "assistant", - "content": [ - { - "toolUse": { - "toolUseId": message.tool_calls[0].id, - "name": message.tool_calls[0].function.name, - "input": json.loads(message.tool_calls[0].function.arguments), - } - } - ], - } - else: - message_dict = {"role": "assistant", "content": [{"text": message.content}]} - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = [{"text": message.content}] - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - message_dict = { - "role": "user", - "content": [ - { - "toolResult": { - "toolUseId": message.tool_call_id, - "content": [{"json": {"text": message.content}}], - } - } - ], - } - else: - raise ValueError(f"Got unknown type {message}") - return message_dict - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage] | str, - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages or message string - :param tools: tools for tool calling - :return:md = genai.GenerativeModel(model) - """ - prefix = model.split(".")[0] - model_name = model.split(".")[1] - - if isinstance(prompt_messages, str): - prompt = prompt_messages - else: - prompt = self._convert_messages_to_prompt(prompt_messages, prefix, model_name) - - return self._get_num_tokens_by_gpt2(prompt) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - required_params = {} - if "anthropic" in model: - required_params = { - "max_tokens": 32, - } - elif "ai21" in model: - # ValidationException: Malformed input request: #/temperature: expected type: Number, - # found: Null#/maxTokens: expected type: Integer, found: Null#/topP: expected type: Number, found: Null, - # please reformat your input and try again. - required_params = { - "temperature": 0.7, - "topP": 0.9, - "maxTokens": 32, - } - - try: - ping_message = UserPromptMessage(content="ping") - self._invoke( - model=model, - credentials=credentials, - prompt_messages=[ping_message], - model_parameters=required_params, - stream=False, - ) - - except ClientError as ex: - error_code = ex.response["Error"]["Code"] - full_error_msg = f"{error_code}: {ex.response['Error']['Message']}" - raise CredentialsValidateFailedError(str(self._map_client_to_invoke_error(error_code, full_error_msg))) - - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _convert_one_message_to_text( - self, message: PromptMessage, model_prefix: str, model_name: Optional[str] = None - ) -> str: - """ - Convert a single message to a string. - - :param message: PromptMessage to convert. - :return: String representation of the message. - """ - human_prompt_prefix = "" - human_prompt_postfix = "" - ai_prompt = "" - - content = message.content - - if isinstance(message, UserPromptMessage): - body = content - if isinstance(content, list): - body = "".join([c.data for c in content if c.type == PromptMessageContentType.TEXT]) - message_text = f"{human_prompt_prefix} {body} {human_prompt_postfix}" - elif isinstance(message, AssistantPromptMessage): - message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage): - message_text = content - elif isinstance(message, ToolPromptMessage): - message_text = f"{human_prompt_prefix} {message.content}" - else: - raise ValueError(f"Got unknown type {message}") - - return message_text - - def _convert_messages_to_prompt( - self, messages: list[PromptMessage], model_prefix: str, model_name: Optional[str] = None - ) -> str: - """ - Format a list of messages into a full prompt for the Anthropic, Amazon and Llama models - - :param messages: List of PromptMessage to combine. - :param model_name: specific model name.Optional,just to distinguish llama2 and llama3 - :return: Combined string with necessary human_prompt and ai_prompt tags. - """ - if not messages: - return "" - - messages = messages.copy() # don't mutate the original list - if not isinstance(messages[-1], AssistantPromptMessage): - messages.append(AssistantPromptMessage(content="")) - - text = "".join(self._convert_one_message_to_text(message, model_prefix, model_name) for message in messages) - - # trim off the trailing ' ' that might come from the "Assistant: " - return text.rstrip() - - def _create_payload( - self, - model: str, - prompt_messages: list[PromptMessage], - model_parameters: dict, - stop: Optional[list[str]] = None, - stream: bool = True, - ): - """ - Create payload for bedrock api call depending on model provider - """ - payload = {} - model_prefix = model.split(".")[0] - model_name = model.split(".")[1] - - if model_prefix == "ai21": - payload["temperature"] = model_parameters.get("temperature") - payload["topP"] = model_parameters.get("topP") - payload["maxTokens"] = model_parameters.get("maxTokens") - payload["prompt"] = self._convert_messages_to_prompt(prompt_messages, model_prefix) - - if model_parameters.get("presencePenalty"): - payload["presencePenalty"] = {model_parameters.get("presencePenalty")} - if model_parameters.get("frequencyPenalty"): - payload["frequencyPenalty"] = {model_parameters.get("frequencyPenalty")} - if model_parameters.get("countPenalty"): - payload["countPenalty"] = {model_parameters.get("countPenalty")} - - elif model_prefix == "cohere": - payload = {**model_parameters} - payload["prompt"] = prompt_messages[0].content - payload["stream"] = stream - - else: - raise ValueError(f"Got unknown model prefix {model_prefix}") - - return payload - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: credentials kwargs - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - client_config = Config(region_name=credentials["aws_region"]) - - runtime_client = boto3.client( - service_name="bedrock-runtime", - config=client_config, - aws_access_key_id=credentials.get("aws_access_key_id"), - aws_secret_access_key=credentials.get("aws_secret_access_key"), - ) - - model_prefix = model.split(".")[0] - payload = self._create_payload(model, prompt_messages, model_parameters, stop, stream) - - # need workaround for ai21 models which doesn't support streaming - if stream and model_prefix != "ai21": - invoke = runtime_client.invoke_model_with_response_stream - else: - invoke = runtime_client.invoke_model - - try: - body_jsonstr = json.dumps(payload) - response = invoke(modelId=model, contentType="application/json", accept="*/*", body=body_jsonstr) - except ClientError as ex: - error_code = ex.response["Error"]["Code"] - full_error_msg = f"{error_code}: {ex.response['Error']['Message']}" - raise self._map_client_to_invoke_error(error_code, full_error_msg) - - except (EndpointConnectionError, NoRegionError, ServiceNotInRegionError) as ex: - raise InvokeConnectionError(str(ex)) - - except UnknownServiceError as ex: - raise InvokeServerUnavailableError(str(ex)) - - except Exception as ex: - raise InvokeError(str(ex)) - - if stream: - return self._handle_generate_stream_response(model, credentials, response, prompt_messages) - - return self._handle_generate_response(model, credentials, response, prompt_messages) - - def _handle_generate_response( - self, model: str, credentials: dict, response: dict, prompt_messages: list[PromptMessage] - ) -> LLMResult: - """ - Handle llm response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response - """ - response_body = json.loads(response.get("body").read().decode("utf-8")) - - finish_reason = response_body.get("error") - - if finish_reason is not None: - raise InvokeError(finish_reason) - - # get output text and calculate num tokens based on model / provider - model_prefix = model.split(".")[0] - - if model_prefix == "ai21": - output = response_body.get("completions")[0].get("data").get("text") - prompt_tokens = len(response_body.get("prompt").get("tokens")) - completion_tokens = len(response_body.get("completions")[0].get("data").get("tokens")) - - elif model_prefix == "cohere": - output = response_body.get("generations")[0].get("text") - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, output or "") - - else: - raise ValueError(f"Got unknown model prefix {model_prefix} when handling block response") - - # construct assistant message from output - assistant_prompt_message = AssistantPromptMessage(content=output) - - # calculate usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # construct response - result = LLMResult( - model=model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - ) - - return result - - def _handle_generate_stream_response( - self, model: str, credentials: dict, response: dict, prompt_messages: list[PromptMessage] - ) -> Generator: - """ - Handle llm stream response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response chunk generator result - """ - model_prefix = model.split(".")[0] - if model_prefix == "ai21": - response_body = json.loads(response.get("body").read().decode("utf-8")) - - content = response_body.get("completions")[0].get("data").get("text") - finish_reason = response_body.get("completions")[0].get("finish_reason") - - prompt_tokens = len(response_body.get("prompt").get("tokens")) - completion_tokens = len(response_body.get("completions")[0].get("data").get("tokens")) - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, message=AssistantPromptMessage(content=content), finish_reason=finish_reason, usage=usage - ), - ) - return - - stream = response.get("body") - if not stream: - raise InvokeError("No response body") - - index = -1 - for event in stream: - chunk = event.get("chunk") - - if not chunk: - exception_name = next(iter(event)) - full_ex_msg = f"{exception_name}: {event[exception_name]['message']}" - raise self._map_client_to_invoke_error(exception_name, full_ex_msg) - - payload = json.loads(chunk.get("bytes").decode()) - - model_prefix = model.split(".")[0] - if model_prefix == "cohere": - content_delta = payload.get("text") - finish_reason = payload.get("finish_reason") - - else: - raise ValueError(f"Got unknown model prefix {model_prefix} when handling stream response") - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=content_delta or "", - ) - index += 1 - - if not finish_reason: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta(index=index, message=assistant_prompt_message), - ) - - else: - # get num tokens from metrics in last chunk - prompt_tokens = payload["amazon-bedrock-invocationMetrics"]["inputTokenCount"] - completion_tokens = payload["amazon-bedrock-invocationMetrics"]["outputTokenCount"] - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, message=assistant_prompt_message, finish_reason=finish_reason, usage=usage - ), - ) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the ermd = genai.GenerativeModel(model) error type thrown to the caller - The value is the md = genai.GenerativeModel(model) error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke emd = genai.GenerativeModel(model) error mapping - """ - return { - InvokeConnectionError: [], - InvokeServerUnavailableError: [], - InvokeRateLimitError: [], - InvokeAuthorizationError: [], - InvokeBadRequestError: [], - } - - def _map_client_to_invoke_error(self, error_code: str, error_msg: str) -> type[InvokeError]: - """ - Map client error to invoke error - - :param error_code: error code - :param error_msg: error message - :return: invoke error - """ - - if error_code == "AccessDeniedException": - return InvokeAuthorizationError(error_msg) - elif error_code in {"ResourceNotFoundException", "ValidationException"}: - return InvokeBadRequestError(error_msg) - elif error_code in {"ThrottlingException", "ServiceQuotaExceededException"}: - return InvokeRateLimitError(error_msg) - elif error_code in { - "ModelTimeoutException", - "ModelErrorException", - "InternalServerException", - "ModelNotReadyException", - }: - return InvokeServerUnavailableError(error_msg) - elif error_code == "ModelStreamErrorException": - return InvokeConnectionError(error_msg) - - return InvokeError(error_msg) diff --git a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama2-13b-chat-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/meta.llama2-13b-chat-v1.yaml deleted file mode 100644 index a8d3704c15..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama2-13b-chat-v1.yaml +++ /dev/null @@ -1,23 +0,0 @@ -model: meta.llama2-13b-chat-v1 -label: - en_US: Llama 2 Chat 13B -model_type: llm -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_gen_len - use_template: max_tokens - required: true - default: 2048 - min: 1 - max: 2048 -pricing: - input: '0.00075' - output: '0.00100' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama2-70b-chat-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/meta.llama2-70b-chat-v1.yaml deleted file mode 100644 index 77525e630b..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama2-70b-chat-v1.yaml +++ /dev/null @@ -1,23 +0,0 @@ -model: meta.llama2-70b-chat-v1 -label: - en_US: Llama 2 Chat 70B -model_type: llm -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_gen_len - use_template: max_tokens - required: true - default: 2048 - min: 1 - max: 2048 -pricing: - input: '0.00195' - output: '0.00256' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-1-405b-instruct-v1.0.yaml b/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-1-405b-instruct-v1.0.yaml deleted file mode 100644 index 401de65f89..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-1-405b-instruct-v1.0.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: meta.llama3-1-405b-instruct-v1:0 -label: - en_US: Llama 3.1 405B Instruct -model_type: llm -model_properties: - mode: completion - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - - name: top_p - use_template: top_p - default: 0.9 - - name: max_gen_len - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 2048 -pricing: - input: '0.00532' - output: '0.016' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-1-70b-instruct-v1.0.yaml b/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-1-70b-instruct-v1.0.yaml deleted file mode 100644 index 10bfa7b1d5..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-1-70b-instruct-v1.0.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: meta.llama3-1-70b-instruct-v1:0 -label: - en_US: Llama 3.1 Instruct 70B -model_type: llm -model_properties: - mode: completion - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - - name: top_p - use_template: top_p - default: 0.9 - - name: max_gen_len - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 2048 -pricing: - input: '0.00265' - output: '0.0035' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-1-8b-instruct-v1.0.yaml b/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-1-8b-instruct-v1.0.yaml deleted file mode 100644 index 81cd53243f..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-1-8b-instruct-v1.0.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: meta.llama3-1-8b-instruct-v1:0 -label: - en_US: Llama 3.1 Instruct 8B -model_type: llm -model_properties: - mode: completion - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - - name: top_p - use_template: top_p - default: 0.9 - - name: max_gen_len - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 2048 -pricing: - input: '0.0003' - output: '0.0006' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-70b-instruct-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-70b-instruct-v1.yaml deleted file mode 100644 index 204662690e..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-70b-instruct-v1.yaml +++ /dev/null @@ -1,23 +0,0 @@ -model: meta.llama3-70b-instruct-v1:0 -label: - en_US: Llama 3 Instruct 70B -model_type: llm -model_properties: - mode: completion - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_gen_len - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 2048 -pricing: - input: '0.00265' - output: '0.0035' - unit: '0.00001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-8b-instruct-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-8b-instruct-v1.yaml deleted file mode 100644 index dd4f666a5f..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/meta.llama3-8b-instruct-v1.yaml +++ /dev/null @@ -1,23 +0,0 @@ -model: meta.llama3-8b-instruct-v1:0 -label: - en_US: Llama 3 Instruct 8B -model_type: llm -model_properties: - mode: completion - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_gen_len - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 2048 -pricing: - input: '0.0004' - output: '0.0006' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/mistral.mistral-7b-instruct-v0.2.yaml b/api/core/model_runtime/model_providers/bedrock/llm/mistral.mistral-7b-instruct-v0.2.yaml deleted file mode 100644 index 175c14da37..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/mistral.mistral-7b-instruct-v0.2.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: mistral.mistral-7b-instruct-v0:2 -label: - en_US: Mistral 7B Instruct -model_type: llm -model_properties: - mode: completion - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - required: false - default: 0.5 - - name: top_p - use_template: top_p - required: false - default: 0.9 - - name: top_k - use_template: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 50 - max: 200 - - name: max_tokens - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.00015' - output: '0.0002' - unit: '0.00001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/mistral.mistral-large-2402-v1.0.yaml b/api/core/model_runtime/model_providers/bedrock/llm/mistral.mistral-large-2402-v1.0.yaml deleted file mode 100644 index 65eed5926b..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/mistral.mistral-large-2402-v1.0.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: mistral.mistral-large-2402-v1:0 -label: - en_US: Mistral Large -model_type: llm -features: - - tool-call - - agent-thought -model_properties: - mode: completion - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - required: false - default: 0.7 - - name: top_p - use_template: top_p - required: false - default: 1 - - name: max_tokens - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 4096 -pricing: - input: '0.008' - output: '0.024' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/mistral.mistral-large-2407-v1.0.yaml b/api/core/model_runtime/model_providers/bedrock/llm/mistral.mistral-large-2407-v1.0.yaml deleted file mode 100644 index 19d7843a57..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/mistral.mistral-large-2407-v1.0.yaml +++ /dev/null @@ -1,29 +0,0 @@ -model: mistral.mistral-large-2407-v1:0 -label: - en_US: Mistral Large 2 (24.07) -model_type: llm -features: - - tool-call -model_properties: - mode: completion - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - required: false - default: 0.7 - - name: top_p - use_template: top_p - required: false - default: 1 - - name: max_tokens - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.003' - output: '0.009' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/mistral.mistral-small-2402-v1.0.yaml b/api/core/model_runtime/model_providers/bedrock/llm/mistral.mistral-small-2402-v1.0.yaml deleted file mode 100644 index b97c2a9493..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/mistral.mistral-small-2402-v1.0.yaml +++ /dev/null @@ -1,29 +0,0 @@ -model: mistral.mistral-small-2402-v1:0 -label: - en_US: Mistral Small -model_type: llm -features: - - tool-call -model_properties: - mode: completion - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - required: false - default: 0.7 - - name: top_p - use_template: top_p - required: false - default: 1 - - name: max_tokens - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 4096 -pricing: - input: '0.001' - output: '0.03' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/mistral.mixtral-8x7b-instruct-v0.1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/mistral.mixtral-8x7b-instruct-v0.1.yaml deleted file mode 100644 index 03ec7eddaf..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/mistral.mixtral-8x7b-instruct-v0.1.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: mistral.mixtral-8x7b-instruct-v0:1 -label: - en_US: Mixtral 8X7B Instruct -model_type: llm -model_properties: - mode: completion - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - required: false - default: 0.5 - - name: top_p - use_template: top_p - required: false - default: 0.9 - - name: top_k - use_template: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 50 - max: 200 - - name: max_tokens - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.00045' - output: '0.0007' - unit: '0.00001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-haiku-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-haiku-v1.yaml deleted file mode 100644 index 9247f46974..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-haiku-v1.yaml +++ /dev/null @@ -1,59 +0,0 @@ -model: us.anthropic.claude-3-haiku-20240307-v1:0 -label: - en_US: Claude 3 Haiku(US.Cross Region Inference) -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - # docs: https://docs.anthropic.com/claude/docs/system-prompts - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.00025' - output: '0.00125' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-opus-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-opus-v1.yaml deleted file mode 100644 index f9854d51f0..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-opus-v1.yaml +++ /dev/null @@ -1,59 +0,0 @@ -model: us.anthropic.claude-3-opus-20240229-v1:0 -label: - en_US: Claude 3 Opus(US.Cross Region Inference) -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - # docs: https://docs.anthropic.com/claude/docs/system-prompts - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.015' - output: '0.075' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.5.yaml b/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.5.yaml deleted file mode 100644 index fbcab2d5f3..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.5.yaml +++ /dev/null @@ -1,58 +0,0 @@ -model: us.anthropic.claude-3-5-sonnet-20240620-v1:0 -label: - en_US: Claude 3.5 Sonnet(US.Cross Region Inference) -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.003' - output: '0.015' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.yaml deleted file mode 100644 index 9f5a1501f0..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/llm/us.anthropic.claude-3-sonnet-v1.yaml +++ /dev/null @@ -1,58 +0,0 @@ -model: us.anthropic.claude-3-sonnet-20240229-v1:0 -label: - en_US: Claude 3 Sonnet(US.Cross Region Inference) -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.003' - output: '0.015' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/text_embedding/__init__.py b/api/core/model_runtime/model_providers/bedrock/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/bedrock/text_embedding/_position.yaml b/api/core/model_runtime/model_providers/bedrock/text_embedding/_position.yaml deleted file mode 100644 index afbea06a3e..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/text_embedding/_position.yaml +++ /dev/null @@ -1,4 +0,0 @@ -- amazon.titan-embed-text-v1 -- amazon.titan-embed-text-v2:0 -- cohere.embed-english-v3 -- cohere.embed-multilingual-v3 diff --git a/api/core/model_runtime/model_providers/bedrock/text_embedding/amazon.titan-embed-text-v1.yaml b/api/core/model_runtime/model_providers/bedrock/text_embedding/amazon.titan-embed-text-v1.yaml deleted file mode 100644 index e5a55971a1..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/text_embedding/amazon.titan-embed-text-v1.yaml +++ /dev/null @@ -1,8 +0,0 @@ -model: amazon.titan-embed-text-v1 -model_type: text-embedding -model_properties: - context_size: 8192 -pricing: - input: '0.0001' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/text_embedding/amazon.titan-embed-text-v2.yaml b/api/core/model_runtime/model_providers/bedrock/text_embedding/amazon.titan-embed-text-v2.yaml deleted file mode 100644 index 5069efeb10..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/text_embedding/amazon.titan-embed-text-v2.yaml +++ /dev/null @@ -1,8 +0,0 @@ -model: amazon.titan-embed-text-v2:0 -model_type: text-embedding -model_properties: - context_size: 8192 -pricing: - input: '0.00002' - unit: '0.00001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/text_embedding/cohere.embed-english-v3.yaml b/api/core/model_runtime/model_providers/bedrock/text_embedding/cohere.embed-english-v3.yaml deleted file mode 100644 index d49aa2a99c..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/text_embedding/cohere.embed-english-v3.yaml +++ /dev/null @@ -1,8 +0,0 @@ -model: cohere.embed-english-v3 -model_type: text-embedding -model_properties: - context_size: 512 -pricing: - input: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/text_embedding/cohere.embed-multilingual-v3.yaml b/api/core/model_runtime/model_providers/bedrock/text_embedding/cohere.embed-multilingual-v3.yaml deleted file mode 100644 index 63bab59d2c..0000000000 --- a/api/core/model_runtime/model_providers/bedrock/text_embedding/cohere.embed-multilingual-v3.yaml +++ /dev/null @@ -1,8 +0,0 @@ -model: cohere.embed-multilingual-v3 -model_type: text-embedding -model_properties: - context_size: 512 -pricing: - input: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/chatglm/__init__.py b/api/core/model_runtime/model_providers/chatglm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/chatglm/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/chatglm/_assets/icon_l_en.svg deleted file mode 100644 index a824d43d6f..0000000000 --- a/api/core/model_runtime/model_providers/chatglm/_assets/icon_l_en.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/chatglm/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/chatglm/_assets/icon_s_en.svg deleted file mode 100644 index 466b4fce57..0000000000 --- a/api/core/model_runtime/model_providers/chatglm/_assets/icon_s_en.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - - - - - - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/chatglm/chatglm.py b/api/core/model_runtime/model_providers/chatglm/chatglm.py deleted file mode 100644 index 71d9a15322..0000000000 --- a/api/core/model_runtime/model_providers/chatglm/chatglm.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class ChatGLMProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `chatglm3-6b` model for validate, - model_instance.validate_credentials(model="chatglm3-6b", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/chatglm/chatglm.yaml b/api/core/model_runtime/model_providers/chatglm/chatglm.yaml deleted file mode 100644 index 0c1688c350..0000000000 --- a/api/core/model_runtime/model_providers/chatglm/chatglm.yaml +++ /dev/null @@ -1,28 +0,0 @@ -provider: chatglm -label: - en_US: ChatGLM -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#F4F7FF" -help: - title: - en_US: Deploy ChatGLM to your local - zh_Hans: 部署您的本地 ChatGLM - url: - en_US: https://github.com/THUDM/ChatGLM3 -supported_model_types: - - llm -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_base - label: - en_US: API URL - type: text-input - required: true - placeholder: - zh_Hans: 在此输入您的 API URL - en_US: Enter your API URL diff --git a/api/core/model_runtime/model_providers/chatglm/llm/__init__.py b/api/core/model_runtime/model_providers/chatglm/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/chatglm/llm/chatglm2-6b-32k.yaml b/api/core/model_runtime/model_providers/chatglm/llm/chatglm2-6b-32k.yaml deleted file mode 100644 index d1075d74b5..0000000000 --- a/api/core/model_runtime/model_providers/chatglm/llm/chatglm2-6b-32k.yaml +++ /dev/null @@ -1,21 +0,0 @@ -model: chatglm2-6b-32k -label: - en_US: ChatGLM2-6B-32K -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 2000 - min: 1 - max: 32000 diff --git a/api/core/model_runtime/model_providers/chatglm/llm/chatglm2-6b.yaml b/api/core/model_runtime/model_providers/chatglm/llm/chatglm2-6b.yaml deleted file mode 100644 index e3cfeb9001..0000000000 --- a/api/core/model_runtime/model_providers/chatglm/llm/chatglm2-6b.yaml +++ /dev/null @@ -1,21 +0,0 @@ -model: chatglm2-6b -label: - en_US: ChatGLM2-6B -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 2000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 256 - min: 1 - max: 2000 diff --git a/api/core/model_runtime/model_providers/chatglm/llm/chatglm3-6b-32k.yaml b/api/core/model_runtime/model_providers/chatglm/llm/chatglm3-6b-32k.yaml deleted file mode 100644 index 6f34743513..0000000000 --- a/api/core/model_runtime/model_providers/chatglm/llm/chatglm3-6b-32k.yaml +++ /dev/null @@ -1,22 +0,0 @@ -model: chatglm3-6b-32k -label: - en_US: ChatGLM3-6B-32K -model_type: llm -features: - - tool-call - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 8000 - min: 1 - max: 32000 diff --git a/api/core/model_runtime/model_providers/chatglm/llm/chatglm3-6b.yaml b/api/core/model_runtime/model_providers/chatglm/llm/chatglm3-6b.yaml deleted file mode 100644 index d6d87e2e09..0000000000 --- a/api/core/model_runtime/model_providers/chatglm/llm/chatglm3-6b.yaml +++ /dev/null @@ -1,22 +0,0 @@ -model: chatglm3-6b -label: - en_US: ChatGLM3-6B -model_type: llm -features: - - tool-call - - agent-thought -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 256 - min: 1 - max: 8000 diff --git a/api/core/model_runtime/model_providers/chatglm/llm/llm.py b/api/core/model_runtime/model_providers/chatglm/llm/llm.py deleted file mode 100644 index b3eeb48e22..0000000000 --- a/api/core/model_runtime/model_providers/chatglm/llm/llm.py +++ /dev/null @@ -1,507 +0,0 @@ -import logging -from collections.abc import Generator -from typing import Optional, cast - -from httpx import Timeout -from openai import ( - APIConnectionError, - APITimeoutError, - AuthenticationError, - ConflictError, - InternalServerError, - NotFoundError, - OpenAI, - PermissionDeniedError, - RateLimitError, - Stream, - UnprocessableEntityError, -) -from openai.types.chat import ChatCompletion, ChatCompletionChunk -from openai.types.chat.chat_completion_message import FunctionCall -from yarl import URL - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.utils import helper - -logger = logging.getLogger(__name__) - - -class ChatGLMLargeLanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # invoke model - return self._generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool] | None = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: - """ - return self._num_tokens_from_messages(messages=prompt_messages, tools=tools) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke( - model=model, - credentials=credentials, - prompt_messages=[ - UserPromptMessage(content="ping"), - ], - model_parameters={ - "max_tokens": 16, - }, - ) - except Exception as e: - raise CredentialsValidateFailedError(str(e)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [ - APIConnectionError, - APITimeoutError, - ], - InvokeServerUnavailableError: [ - InternalServerError, - ConflictError, - NotFoundError, - UnprocessableEntityError, - PermissionDeniedError, - ], - InvokeRateLimitError: [RateLimitError], - InvokeAuthorizationError: [AuthenticationError], - InvokeBadRequestError: [ValueError], - } - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - """ - Invoke large language model - - :param model: model name - :param credentials: credentials kwargs - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - - self._check_chatglm_parameters(model=model, model_parameters=model_parameters, tools=tools) - - kwargs = self._to_client_kwargs(credentials) - # init model client - client = OpenAI(**kwargs) - - extra_model_kwargs = {} - if stop: - extra_model_kwargs["stop"] = stop - - if user: - extra_model_kwargs["user"] = user - - if tools and len(tools) > 0: - extra_model_kwargs["functions"] = [helper.dump_model(tool) for tool in tools] - - result = client.chat.completions.create( - messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages], - model=model, - stream=stream, - **model_parameters, - **extra_model_kwargs, - ) - - if stream: - return self._handle_chat_generate_stream_response( - model=model, credentials=credentials, response=result, tools=tools, prompt_messages=prompt_messages - ) - - return self._handle_chat_generate_response( - model=model, credentials=credentials, response=result, tools=tools, prompt_messages=prompt_messages - ) - - def _check_chatglm_parameters(self, model: str, model_parameters: dict, tools: list[PromptMessageTool]) -> None: - if model.find("chatglm2") != -1 and tools is not None and len(tools) > 0: - raise InvokeBadRequestError("ChatGLM2 does not support function calling") - - def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: - """ - Convert PromptMessage to dict for OpenAI Compatibility API - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - raise ValueError("User message content must be str") - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - if message.tool_calls and len(message.tool_calls) > 0: - message_dict["function_call"] = { - "name": message.tool_calls[0].function.name, - "arguments": message.tool_calls[0].function.arguments, - } - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - elif isinstance(message, ToolPromptMessage): - # check if last message is user message - message = cast(ToolPromptMessage, message) - message_dict = {"role": "function", "content": message.content} - else: - raise ValueError(f"Unknown message type {type(message)}") - - return message_dict - - def _extract_response_tool_calls( - self, response_function_calls: list[FunctionCall] - ) -> list[AssistantPromptMessage.ToolCall]: - """ - Extract tool calls from response - - :param response_tool_calls: response tool calls - :return: list of tool calls - """ - tool_calls = [] - if response_function_calls: - for response_tool_call in response_function_calls: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_tool_call.name, arguments=response_tool_call.arguments - ) - - tool_call = AssistantPromptMessage.ToolCall(id=0, type="function", function=function) - tool_calls.append(tool_call) - - return tool_calls - - def _to_client_kwargs(self, credentials: dict) -> dict: - """ - Convert invoke kwargs to client kwargs - - :param stream: is stream response - :param model_name: model name - :param credentials: credentials dict - :param model_parameters: model parameters - :return: client kwargs - """ - client_kwargs = { - "timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0), - "api_key": "1", - "base_url": str(URL(credentials["api_base"]) / "v1"), - } - - return client_kwargs - - def _handle_chat_generate_stream_response( - self, - model: str, - credentials: dict, - response: Stream[ChatCompletionChunk], - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> Generator: - full_response = "" - - for chunk in response: - if len(chunk.choices) == 0: - continue - - delta = chunk.choices[0] - - if delta.finish_reason is None and (delta.delta.content is None or delta.delta.content == ""): - continue - - # check if there is a tool call in the response - function_calls = None - if delta.delta.function_call: - function_calls = [delta.delta.function_call] - - assistant_message_tool_calls = self._extract_response_tool_calls(function_calls or []) - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=delta.delta.content or "", tool_calls=assistant_message_tool_calls - ) - - if delta.finish_reason is not None: - # temp_assistant_prompt_message is used to calculate usage - temp_assistant_prompt_message = AssistantPromptMessage( - content=full_response, tool_calls=assistant_message_tool_calls - ) - - prompt_tokens = self._num_tokens_from_messages(messages=prompt_messages, tools=tools) - completion_tokens = self._num_tokens_from_messages(messages=[temp_assistant_prompt_message], tools=[]) - - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - ) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - finish_reason=delta.finish_reason, - usage=usage, - ), - ) - else: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - ), - ) - - full_response += delta.delta.content - - def _handle_chat_generate_response( - self, - model: str, - credentials: dict, - response: ChatCompletion, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> LLMResult: - """ - Handle llm chat response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: llm response - """ - if len(response.choices) == 0: - raise InvokeServerUnavailableError("Empty response") - assistant_message = response.choices[0].message - - # convert function call to tool call - function_calls = assistant_message.function_call - tool_calls = self._extract_response_tool_calls([function_calls] if function_calls else []) - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=assistant_message.content, tool_calls=tool_calls) - - prompt_tokens = self._num_tokens_from_messages(messages=prompt_messages, tools=tools) - completion_tokens = self._num_tokens_from_messages(messages=[assistant_prompt_message], tools=tools) - - usage = self._calc_response_usage( - model=model, credentials=credentials, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens - ) - - response = LLMResult( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=response.system_fingerprint, - usage=usage, - message=assistant_prompt_message, - ) - - return response - - def _num_tokens_from_string(self, text: str, tools: Optional[list[PromptMessageTool]] = None) -> int: - """ - Calculate num tokens for text completion model with tiktoken package. - - :param model: model name - :param text: prompt text - :param tools: tools for tool calling - :return: number of tokens - """ - num_tokens = self._get_num_tokens_by_gpt2(text) - - if tools: - num_tokens += self._num_tokens_for_tools(tools) - - return num_tokens - - def _num_tokens_from_messages( - self, messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None - ) -> int: - """Calculate num tokens for chatglm2 and chatglm3 with GPT2 tokenizer. - - it's too complex to calculate num tokens for chatglm2 and chatglm3 with ChatGLM tokenizer, - As a temporary solution we use GPT2 tokenizer instead. - - """ - - def tokens(text: str): - return self._get_num_tokens_by_gpt2(text) - - tokens_per_message = 3 - tokens_per_name = 1 - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - value = text - - if key == "function_call": - for t_key, t_value in value.items(): - num_tokens += tokens(t_key) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += tokens(f_key) - num_tokens += tokens(f_value) - else: - num_tokens += tokens(t_key) - num_tokens += tokens(t_value) - else: - num_tokens += tokens(str(value)) - - if key == "name": - num_tokens += tokens_per_name - - # every reply is primed with assistant - num_tokens += 3 - - if tools: - num_tokens += self._num_tokens_for_tools(tools) - - return num_tokens - - def _num_tokens_for_tools(self, tools: list[PromptMessageTool]) -> int: - """ - Calculate num tokens for tool calling - - :param encoding: encoding - :param tools: tools for tool calling - :return: number of tokens - """ - - def tokens(text: str): - return self._get_num_tokens_by_gpt2(text) - - num_tokens = 0 - for tool in tools: - # calculate num tokens for function object - num_tokens += tokens("name") - num_tokens += tokens(tool.name) - num_tokens += tokens("description") - num_tokens += tokens(tool.description) - parameters = tool.parameters - num_tokens += tokens("parameters") - num_tokens += tokens("type") - num_tokens += tokens(parameters.get("type")) - if "properties" in parameters: - num_tokens += tokens("properties") - for key, value in parameters.get("properties").items(): - num_tokens += tokens(key) - for field_key, field_value in value.items(): - num_tokens += tokens(field_key) - if field_key == "enum": - for enum_field in field_value: - num_tokens += 3 - num_tokens += tokens(enum_field) - else: - num_tokens += tokens(field_key) - num_tokens += tokens(str(field_value)) - if "required" in parameters: - num_tokens += tokens("required") - for required_field in parameters["required"]: - num_tokens += 3 - num_tokens += tokens(required_field) - - return num_tokens diff --git a/api/core/model_runtime/model_providers/cohere/__init__.py b/api/core/model_runtime/model_providers/cohere/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/cohere/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/cohere/_assets/icon_l_en.svg deleted file mode 100644 index 9c176896fe..0000000000 --- a/api/core/model_runtime/model_providers/cohere/_assets/icon_l_en.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/cohere/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/cohere/_assets/icon_s_en.svg deleted file mode 100644 index 28fe96d390..0000000000 --- a/api/core/model_runtime/model_providers/cohere/_assets/icon_s_en.svg +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/cohere/cohere.py b/api/core/model_runtime/model_providers/cohere/cohere.py deleted file mode 100644 index 8394a45fcf..0000000000 --- a/api/core/model_runtime/model_providers/cohere/cohere.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class CohereProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.RERANK) - - # Use `rerank-english-v2.0` model for validate, - model_instance.validate_credentials(model="rerank-english-v2.0", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/cohere/cohere.yaml b/api/core/model_runtime/model_providers/cohere/cohere.yaml deleted file mode 100644 index bd40057fe9..0000000000 --- a/api/core/model_runtime/model_providers/cohere/cohere.yaml +++ /dev/null @@ -1,90 +0,0 @@ -provider: cohere -label: - zh_Hans: Cohere - en_US: Cohere -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#ECE9E3" -help: - title: - en_US: Get your API key from cohere - zh_Hans: 从 cohere 获取 API Key - url: - en_US: https://dashboard.cohere.com/api-keys -supported_model_types: - - llm - - text-embedding - - rerank -configurate_methods: - - predefined-model - - customizable-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - zh_Hans: API Key - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - show_on: [ ] - - variable: base_url - label: - zh_Hans: API Base - en_US: API Base - type: text-input - required: false - placeholder: - zh_Hans: 在此输入您的 API Base,如 https://api.cohere.ai/v1 - en_US: Enter your API Base, e.g. https://api.cohere.ai/v1 -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: mode - show_on: - - variable: __model_type - value: llm - label: - en_US: Completion mode - type: select - required: false - default: chat - placeholder: - zh_Hans: 选择对话类型 - en_US: Select completion mode - options: - - value: completion - label: - en_US: Completion - zh_Hans: 补全 - - value: chat - label: - en_US: Chat - zh_Hans: 对话 - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: base_url - label: - zh_Hans: API Base - en_US: API Base - type: text-input - required: false - placeholder: - zh_Hans: 在此输入您的 API Base,如 https://api.cohere.ai/v1 - en_US: Enter your API Base, e.g. https://api.cohere.ai/v1 diff --git a/api/core/model_runtime/model_providers/cohere/llm/__init__.py b/api/core/model_runtime/model_providers/cohere/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/cohere/llm/_position.yaml b/api/core/model_runtime/model_providers/cohere/llm/_position.yaml deleted file mode 100644 index 42d06f49a2..0000000000 --- a/api/core/model_runtime/model_providers/cohere/llm/_position.yaml +++ /dev/null @@ -1,10 +0,0 @@ -- command-r -- command-r-plus -- command-chat -- command-light-chat -- command-nightly-chat -- command-light-nightly-chat -- command -- command-light -- command-nightly -- command-light-nightly diff --git a/api/core/model_runtime/model_providers/cohere/llm/command-chat.yaml b/api/core/model_runtime/model_providers/cohere/llm/command-chat.yaml deleted file mode 100644 index 5f233f35ce..0000000000 --- a/api/core/model_runtime/model_providers/cohere/llm/command-chat.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: command-chat -label: - zh_Hans: command-chat - en_US: command-chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 - - name: preamble_override - label: - zh_Hans: 前导文本 - en_US: Preamble - type: string - help: - zh_Hans: 当指定时,将使用提供的前导文本替换默认的 Cohere 前导文本。 - en_US: When specified, the default Cohere preamble will be replaced with the provided one. - required: false - - name: prompt_truncation - label: - zh_Hans: 提示截断 - en_US: Prompt Truncation - type: string - help: - zh_Hans: 指定如何构造 Prompt。当 prompt_truncation 设置为 "AUTO" 时,将会丢弃一些来自聊天记录的元素,以尝试构造一个符合模型上下文长度限制的 Prompt。 - en_US: Dictates how the prompt will be constructed. With prompt_truncation set to "AUTO", some elements from chat histories will be dropped in an attempt to construct a prompt that fits within the model's context length limit. - required: true - default: 'AUTO' - options: - - 'AUTO' - - 'OFF' -pricing: - input: '1.0' - output: '2.0' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/llm/command-light-chat.yaml b/api/core/model_runtime/model_providers/cohere/llm/command-light-chat.yaml deleted file mode 100644 index b5f0048770..0000000000 --- a/api/core/model_runtime/model_providers/cohere/llm/command-light-chat.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: command-light-chat -label: - zh_Hans: command-light-chat - en_US: command-light-chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 - - name: preamble_override - label: - zh_Hans: 前导文本 - en_US: Preamble - type: string - help: - zh_Hans: 当指定时,将使用提供的前导文本替换默认的 Cohere 前导文本。 - en_US: When specified, the default Cohere preamble will be replaced with the provided one. - required: false - - name: prompt_truncation - label: - zh_Hans: 提示截断 - en_US: Prompt Truncation - type: string - help: - zh_Hans: 指定如何构造 Prompt。当 prompt_truncation 设置为 "AUTO" 时,将会丢弃一些来自聊天记录的元素,以尝试构造一个符合模型上下文长度限制的 Prompt。 - en_US: Dictates how the prompt will be constructed. With prompt_truncation set to "AUTO", some elements from chat histories will be dropped in an attempt to construct a prompt that fits within the model's context length limit. - required: true - default: 'AUTO' - options: - - 'AUTO' - - 'OFF' -pricing: - input: '0.3' - output: '0.6' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/llm/command-light-nightly-chat.yaml b/api/core/model_runtime/model_providers/cohere/llm/command-light-nightly-chat.yaml deleted file mode 100644 index 1c96b24030..0000000000 --- a/api/core/model_runtime/model_providers/cohere/llm/command-light-nightly-chat.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: command-light-nightly-chat -label: - zh_Hans: command-light-nightly-chat - en_US: command-light-nightly-chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 - - name: preamble_override - label: - zh_Hans: 前导文本 - en_US: Preamble - type: string - help: - zh_Hans: 当指定时,将使用提供的前导文本替换默认的 Cohere 前导文本。 - en_US: When specified, the default Cohere preamble will be replaced with the provided one. - required: false - - name: prompt_truncation - label: - zh_Hans: 提示截断 - en_US: Prompt Truncation - type: string - help: - zh_Hans: 指定如何构造 Prompt。当 prompt_truncation 设置为 "AUTO" 时,将会丢弃一些来自聊天记录的元素,以尝试构造一个符合模型上下文长度限制的 Prompt。 - en_US: Dictates how the prompt will be constructed. With prompt_truncation set to "AUTO", some elements from chat histories will be dropped in an attempt to construct a prompt that fits within the model's context length limit. - required: true - default: 'AUTO' - options: - - 'AUTO' - - 'OFF' -pricing: - input: '0.3' - output: '0.6' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/llm/command-light-nightly.yaml b/api/core/model_runtime/model_providers/cohere/llm/command-light-nightly.yaml deleted file mode 100644 index 4616f76689..0000000000 --- a/api/core/model_runtime/model_providers/cohere/llm/command-light-nightly.yaml +++ /dev/null @@ -1,44 +0,0 @@ -model: command-light-nightly -label: - zh_Hans: command-light-nightly - en_US: command-light-nightly -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 -pricing: - input: '0.3' - output: '0.6' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/llm/command-light.yaml b/api/core/model_runtime/model_providers/cohere/llm/command-light.yaml deleted file mode 100644 index 161756b322..0000000000 --- a/api/core/model_runtime/model_providers/cohere/llm/command-light.yaml +++ /dev/null @@ -1,44 +0,0 @@ -model: command-light -label: - zh_Hans: command-light - en_US: command-light -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 -pricing: - input: '0.3' - output: '0.6' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/llm/command-nightly-chat.yaml b/api/core/model_runtime/model_providers/cohere/llm/command-nightly-chat.yaml deleted file mode 100644 index 739e09e72e..0000000000 --- a/api/core/model_runtime/model_providers/cohere/llm/command-nightly-chat.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: command-nightly-chat -label: - zh_Hans: command-nightly-chat - en_US: command-nightly-chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 - - name: preamble_override - label: - zh_Hans: 前导文本 - en_US: Preamble - type: string - help: - zh_Hans: 当指定时,将使用提供的前导文本替换默认的 Cohere 前导文本。 - en_US: When specified, the default Cohere preamble will be replaced with the provided one. - required: false - - name: prompt_truncation - label: - zh_Hans: 提示截断 - en_US: Prompt Truncation - type: string - help: - zh_Hans: 指定如何构造 Prompt。当 prompt_truncation 设置为 "AUTO" 时,将会丢弃一些来自聊天记录的元素,以尝试构造一个符合模型上下文长度限制的 Prompt。 - en_US: Dictates how the prompt will be constructed. With prompt_truncation set to "AUTO", some elements from chat histories will be dropped in an attempt to construct a prompt that fits within the model's context length limit. - required: true - default: 'AUTO' - options: - - 'AUTO' - - 'OFF' -pricing: - input: '1.0' - output: '2.0' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/llm/command-nightly.yaml b/api/core/model_runtime/model_providers/cohere/llm/command-nightly.yaml deleted file mode 100644 index 1e025e40c4..0000000000 --- a/api/core/model_runtime/model_providers/cohere/llm/command-nightly.yaml +++ /dev/null @@ -1,44 +0,0 @@ -model: command-nightly -label: - zh_Hans: command-nightly - en_US: command-nightly -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 -pricing: - input: '1.0' - output: '2.0' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/llm/command-r-plus.yaml b/api/core/model_runtime/model_providers/cohere/llm/command-r-plus.yaml deleted file mode 100644 index 617e6853ea..0000000000 --- a/api/core/model_runtime/model_providers/cohere/llm/command-r-plus.yaml +++ /dev/null @@ -1,45 +0,0 @@ -model: command-r-plus -label: - en_US: command-r-plus -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 -pricing: - input: '3' - output: '15' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/llm/command-r.yaml b/api/core/model_runtime/model_providers/cohere/llm/command-r.yaml deleted file mode 100644 index c36680443b..0000000000 --- a/api/core/model_runtime/model_providers/cohere/llm/command-r.yaml +++ /dev/null @@ -1,45 +0,0 @@ -model: command-r -label: - en_US: command-r -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 -pricing: - input: '0.5' - output: '1.5' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/llm/command.yaml b/api/core/model_runtime/model_providers/cohere/llm/command.yaml deleted file mode 100644 index 0cac7c35ea..0000000000 --- a/api/core/model_runtime/model_providers/cohere/llm/command.yaml +++ /dev/null @@ -1,44 +0,0 @@ -model: command -label: - zh_Hans: command - en_US: command -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 -pricing: - input: '1.0' - output: '2.0' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/llm/llm.py b/api/core/model_runtime/model_providers/cohere/llm/llm.py deleted file mode 100644 index 3863ad3308..0000000000 --- a/api/core/model_runtime/model_providers/cohere/llm/llm.py +++ /dev/null @@ -1,733 +0,0 @@ -import json -import logging -from collections.abc import Generator, Iterator -from typing import Optional, Union, cast - -import cohere -from cohere import ( - ChatMessage, - ChatStreamRequestToolResultsItem, - GenerateStreamedResponse, - GenerateStreamedResponse_StreamEnd, - GenerateStreamedResponse_StreamError, - GenerateStreamedResponse_TextGeneration, - Generation, - NonStreamedChatResponse, - StreamedChatResponse, - StreamedChatResponse_StreamEnd, - StreamedChatResponse_TextGeneration, - StreamedChatResponse_ToolCallsGeneration, - Tool, - ToolCall, - ToolParameterDefinitionsValue, -) -from cohere.core import RequestOptions - -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageContentType, - PromptMessageRole, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, I18nObject, ModelType -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel - -logger = logging.getLogger(__name__) - - -class CohereLargeLanguageModel(LargeLanguageModel): - """ - Model class for Cohere large language model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # get model mode - model_mode = self.get_model_mode(model, credentials) - - if model_mode == LLMMode.CHAT: - return self._chat_generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - else: - return self._generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - stop=stop, - stream=stream, - user=user, - ) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: - """ - # get model mode - model_mode = self.get_model_mode(model) - - try: - if model_mode == LLMMode.CHAT: - return self._num_tokens_from_messages(model, credentials, prompt_messages) - else: - return self._num_tokens_from_string(model, credentials, prompt_messages[0].content) - except Exception as e: - raise self._transform_invoke_error(e) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - # get model mode - model_mode = self.get_model_mode(model) - - if model_mode == LLMMode.CHAT: - self._chat_generate( - model=model, - credentials=credentials, - prompt_messages=[UserPromptMessage(content="ping")], - model_parameters={ - "max_tokens": 20, - "temperature": 0, - }, - stream=False, - ) - else: - self._generate( - model=model, - credentials=credentials, - prompt_messages=[UserPromptMessage(content="ping")], - model_parameters={ - "max_tokens": 20, - "temperature": 0, - }, - stream=False, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke llm model - - :param model: model name - :param credentials: credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # initialize client - client = cohere.Client(credentials.get("api_key"), base_url=credentials.get("base_url")) - - if stop: - model_parameters["end_sequences"] = stop - - if stream: - response = client.generate_stream( - prompt=prompt_messages[0].content, - model=model, - **model_parameters, - request_options=RequestOptions(max_retries=0), - ) - - return self._handle_generate_stream_response(model, credentials, response, prompt_messages) - else: - response = client.generate( - prompt=prompt_messages[0].content, - model=model, - **model_parameters, - request_options=RequestOptions(max_retries=0), - ) - - return self._handle_generate_response(model, credentials, response, prompt_messages) - - def _handle_generate_response( - self, model: str, credentials: dict, response: Generation, prompt_messages: list[PromptMessage] - ) -> LLMResult: - """ - Handle llm response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response - """ - assistant_text = response.generations[0].text - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=assistant_text) - - # calculate num tokens - prompt_tokens = int(response.meta.billed_units.input_tokens) - completion_tokens = int(response.meta.billed_units.output_tokens) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - response = LLMResult( - model=model, prompt_messages=prompt_messages, message=assistant_prompt_message, usage=usage - ) - - return response - - def _handle_generate_stream_response( - self, - model: str, - credentials: dict, - response: Iterator[GenerateStreamedResponse], - prompt_messages: list[PromptMessage], - ) -> Generator: - """ - Handle llm stream response - - :param model: model name - :param response: response - :param prompt_messages: prompt messages - :return: llm response chunk generator - """ - index = 1 - full_assistant_content = "" - for chunk in response: - if isinstance(chunk, GenerateStreamedResponse_TextGeneration): - chunk = cast(GenerateStreamedResponse_TextGeneration, chunk) - text = chunk.text - - if text is None: - continue - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=text) - - full_assistant_content += text - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=assistant_prompt_message, - ), - ) - - index += 1 - elif isinstance(chunk, GenerateStreamedResponse_StreamEnd): - chunk = cast(GenerateStreamedResponse_StreamEnd, chunk) - - # calculate num tokens - prompt_tokens = self._num_tokens_from_messages(model, credentials, prompt_messages) - completion_tokens = self._num_tokens_from_messages( - model, credentials, [AssistantPromptMessage(content=full_assistant_content)] - ) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=AssistantPromptMessage(content=""), - finish_reason=chunk.finish_reason, - usage=usage, - ), - ) - break - elif isinstance(chunk, GenerateStreamedResponse_StreamError): - chunk = cast(GenerateStreamedResponse_StreamError, chunk) - raise InvokeBadRequestError(chunk.err) - - def _chat_generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke llm chat model - - :param model: model name - :param credentials: credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # initialize client - client = cohere.Client(credentials.get("api_key"), base_url=credentials.get("base_url")) - - if stop: - model_parameters["stop_sequences"] = stop - - if tools: - if len(tools) == 1: - raise ValueError("Cohere tool call requires at least two tools to be specified.") - - model_parameters["tools"] = self._convert_tools(tools) - - message, chat_histories, tool_results = self._convert_prompt_messages_to_message_and_chat_histories( - prompt_messages - ) - - if tool_results: - model_parameters["tool_results"] = tool_results - - # chat model - real_model = model - if self.get_model_schema(model, credentials).fetch_from == FetchFrom.PREDEFINED_MODEL: - real_model = model.removesuffix("-chat") - - if stream: - response = client.chat_stream( - message=message, - chat_history=chat_histories, - model=real_model, - **model_parameters, - request_options=RequestOptions(max_retries=0), - ) - - return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages) - else: - response = client.chat( - message=message, - chat_history=chat_histories, - model=real_model, - **model_parameters, - request_options=RequestOptions(max_retries=0), - ) - - return self._handle_chat_generate_response(model, credentials, response, prompt_messages) - - def _handle_chat_generate_response( - self, model: str, credentials: dict, response: NonStreamedChatResponse, prompt_messages: list[PromptMessage] - ) -> LLMResult: - """ - Handle llm chat response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response - """ - assistant_text = response.text - - tool_calls = [] - if response.tool_calls: - for cohere_tool_call in response.tool_calls: - tool_call = AssistantPromptMessage.ToolCall( - id=cohere_tool_call.name, - type="function", - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=cohere_tool_call.name, arguments=json.dumps(cohere_tool_call.parameters) - ), - ) - tool_calls.append(tool_call) - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=assistant_text, tool_calls=tool_calls) - - # calculate num tokens - prompt_tokens = self._num_tokens_from_messages(model, credentials, prompt_messages) - completion_tokens = self._num_tokens_from_messages(model, credentials, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - response = LLMResult( - model=model, prompt_messages=prompt_messages, message=assistant_prompt_message, usage=usage - ) - - return response - - def _handle_chat_generate_stream_response( - self, - model: str, - credentials: dict, - response: Iterator[StreamedChatResponse], - prompt_messages: list[PromptMessage], - ) -> Generator: - """ - Handle llm chat stream response - - :param model: model name - :param response: response - :param prompt_messages: prompt messages - :return: llm response chunk generator - """ - - def final_response( - full_text: str, - tool_calls: list[AssistantPromptMessage.ToolCall], - index: int, - finish_reason: Optional[str] = None, - ) -> LLMResultChunk: - # calculate num tokens - prompt_tokens = self._num_tokens_from_messages(model, credentials, prompt_messages) - - full_assistant_prompt_message = AssistantPromptMessage(content=full_text, tool_calls=tool_calls) - completion_tokens = self._num_tokens_from_messages(model, credentials, [full_assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - return LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=AssistantPromptMessage(content="", tool_calls=tool_calls), - finish_reason=finish_reason, - usage=usage, - ), - ) - - index = 1 - full_assistant_content = "" - tool_calls = [] - for chunk in response: - if isinstance(chunk, StreamedChatResponse_TextGeneration): - chunk = cast(StreamedChatResponse_TextGeneration, chunk) - text = chunk.text - - if text is None: - continue - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=text) - - full_assistant_content += text - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=assistant_prompt_message, - ), - ) - - index += 1 - elif isinstance(chunk, StreamedChatResponse_ToolCallsGeneration): - chunk = cast(StreamedChatResponse_ToolCallsGeneration, chunk) - if chunk.tool_calls: - for cohere_tool_call in chunk.tool_calls: - tool_call = AssistantPromptMessage.ToolCall( - id=cohere_tool_call.name, - type="function", - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=cohere_tool_call.name, arguments=json.dumps(cohere_tool_call.parameters) - ), - ) - tool_calls.append(tool_call) - elif isinstance(chunk, StreamedChatResponse_StreamEnd): - chunk = cast(StreamedChatResponse_StreamEnd, chunk) - yield final_response(full_assistant_content, tool_calls, index, chunk.finish_reason) - index += 1 - - def _convert_prompt_messages_to_message_and_chat_histories( - self, prompt_messages: list[PromptMessage] - ) -> tuple[str, list[ChatMessage], list[ChatStreamRequestToolResultsItem]]: - """ - Convert prompt messages to message and chat histories - :param prompt_messages: prompt messages - :return: - """ - chat_histories = [] - latest_tool_call_n_outputs = [] - for prompt_message in prompt_messages: - if prompt_message.role == PromptMessageRole.ASSISTANT: - prompt_message = cast(AssistantPromptMessage, prompt_message) - if prompt_message.tool_calls: - for tool_call in prompt_message.tool_calls: - latest_tool_call_n_outputs.append( - ChatStreamRequestToolResultsItem( - call=ToolCall( - name=tool_call.function.name, parameters=json.loads(tool_call.function.arguments) - ), - outputs=[], - ) - ) - else: - cohere_prompt_message = self._convert_prompt_message_to_dict(prompt_message) - if cohere_prompt_message: - chat_histories.append(cohere_prompt_message) - elif prompt_message.role == PromptMessageRole.TOOL: - prompt_message = cast(ToolPromptMessage, prompt_message) - if latest_tool_call_n_outputs: - i = 0 - for tool_call_n_outputs in latest_tool_call_n_outputs: - if tool_call_n_outputs.call.name == prompt_message.tool_call_id: - latest_tool_call_n_outputs[i] = ChatStreamRequestToolResultsItem( - call=ToolCall( - name=tool_call_n_outputs.call.name, parameters=tool_call_n_outputs.call.parameters - ), - outputs=[{"result": prompt_message.content}], - ) - break - i += 1 - else: - cohere_prompt_message = self._convert_prompt_message_to_dict(prompt_message) - if cohere_prompt_message: - chat_histories.append(cohere_prompt_message) - - if latest_tool_call_n_outputs: - new_latest_tool_call_n_outputs = [] - for tool_call_n_outputs in latest_tool_call_n_outputs: - if tool_call_n_outputs.outputs: - new_latest_tool_call_n_outputs.append(tool_call_n_outputs) - - latest_tool_call_n_outputs = new_latest_tool_call_n_outputs - - # get latest message from chat histories and pop it - if len(chat_histories) > 0: - latest_message = chat_histories.pop() - message = latest_message.message - else: - raise ValueError("Prompt messages is empty") - - return message, chat_histories, latest_tool_call_n_outputs - - def _convert_prompt_message_to_dict(self, message: PromptMessage) -> Optional[ChatMessage]: - """ - Convert PromptMessage to dict for Cohere model - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - chat_message = ChatMessage(role="USER", message=message.content) - else: - sub_message_text = "" - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - sub_message_text += message_content.data - - chat_message = ChatMessage(role="USER", message=sub_message_text) - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - if not message.content: - return None - chat_message = ChatMessage(role="CHATBOT", message=message.content) - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - chat_message = ChatMessage(role="USER", message=message.content) - elif isinstance(message, ToolPromptMessage): - return None - else: - raise ValueError(f"Got unknown type {message}") - - return chat_message - - def _convert_tools(self, tools: list[PromptMessageTool]) -> list[Tool]: - """ - Convert tools to Cohere model - """ - cohere_tools = [] - for tool in tools: - properties = tool.parameters["properties"] - required_properties = tool.parameters["required"] - - parameter_definitions = {} - for p_key, p_val in properties.items(): - required = False - if p_key in required_properties: - required = True - - desc = p_val["description"] - if "enum" in p_val: - desc += f"; Only accepts one of the following predefined options: [{', '.join(p_val['enum'])}]" - - parameter_definitions[p_key] = ToolParameterDefinitionsValue( - description=desc, type=p_val["type"], required=required - ) - - cohere_tool = Tool( - name=tool.name, description=tool.description, parameter_definitions=parameter_definitions - ) - - cohere_tools.append(cohere_tool) - - return cohere_tools - - def _num_tokens_from_string(self, model: str, credentials: dict, text: str) -> int: - """ - Calculate num tokens for text completion model. - - :param model: model name - :param credentials: credentials - :param text: prompt text - :return: number of tokens - """ - # initialize client - client = cohere.Client(credentials.get("api_key"), base_url=credentials.get("base_url")) - - response = client.tokenize(text=text, model=model) - - return len(response.tokens) - - def _num_tokens_from_messages(self, model: str, credentials: dict, messages: list[PromptMessage]) -> int: - """Calculate num tokens Cohere model.""" - calc_messages = [] - for message in messages: - cohere_message = self._convert_prompt_message_to_dict(message) - if cohere_message: - calc_messages.append(cohere_message) - message_strs = [f"{message.role}: {message.message}" for message in calc_messages] - message_str = "\n".join(message_strs) - - real_model = model - if self.get_model_schema(model, credentials).fetch_from == FetchFrom.PREDEFINED_MODEL: - real_model = model.removesuffix("-chat") - - return self._num_tokens_from_string(real_model, credentials, message_str) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - Cohere supports fine-tuning of their models. This method returns the schema of the base model - but renamed to the fine-tuned model name. - - :param model: model name - :param credentials: credentials - - :return: model schema - """ - # get model schema - models = self.predefined_models() - model_map = {model.model: model for model in models} - - mode = credentials.get("mode") - - if mode == "chat": - base_model_schema = model_map["command-light-chat"] - else: - base_model_schema = model_map["command-light"] - - base_model_schema = cast(AIModelEntity, base_model_schema) - - base_model_schema_features = base_model_schema.features or [] - base_model_schema_model_properties = base_model_schema.model_properties or {} - base_model_schema_parameters_rules = base_model_schema.parameter_rules or [] - - entity = AIModelEntity( - model=model, - label=I18nObject(zh_Hans=model, en_US=model), - model_type=ModelType.LLM, - features=list(base_model_schema_features), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties=dict(base_model_schema_model_properties.items()), - parameter_rules=list(base_model_schema_parameters_rules), - pricing=base_model_schema.pricing, - ) - - return entity - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [cohere.errors.service_unavailable_error.ServiceUnavailableError], - InvokeServerUnavailableError: [cohere.errors.internal_server_error.InternalServerError], - InvokeRateLimitError: [cohere.errors.too_many_requests_error.TooManyRequestsError], - InvokeAuthorizationError: [ - cohere.errors.unauthorized_error.UnauthorizedError, - cohere.errors.forbidden_error.ForbiddenError, - ], - InvokeBadRequestError: [ - cohere.core.api_error.ApiError, - cohere.errors.bad_request_error.BadRequestError, - cohere.errors.not_found_error.NotFoundError, - ], - } diff --git a/api/core/model_runtime/model_providers/cohere/rerank/__init__.py b/api/core/model_runtime/model_providers/cohere/rerank/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/cohere/rerank/_position.yaml b/api/core/model_runtime/model_providers/cohere/rerank/_position.yaml deleted file mode 100644 index 4dd58fc170..0000000000 --- a/api/core/model_runtime/model_providers/cohere/rerank/_position.yaml +++ /dev/null @@ -1,4 +0,0 @@ -- rerank-english-v2.0 -- rerank-english-v3.0 -- rerank-multilingual-v2.0 -- rerank-multilingual-v3.0 diff --git a/api/core/model_runtime/model_providers/cohere/rerank/rerank-english-v2.0.yaml b/api/core/model_runtime/model_providers/cohere/rerank/rerank-english-v2.0.yaml deleted file mode 100644 index 70b4a91a0d..0000000000 --- a/api/core/model_runtime/model_providers/cohere/rerank/rerank-english-v2.0.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: rerank-english-v2.0 -model_type: rerank -model_properties: - context_size: 5120 diff --git a/api/core/model_runtime/model_providers/cohere/rerank/rerank-english-v3.0.yaml b/api/core/model_runtime/model_providers/cohere/rerank/rerank-english-v3.0.yaml deleted file mode 100644 index 3779f0b6c2..0000000000 --- a/api/core/model_runtime/model_providers/cohere/rerank/rerank-english-v3.0.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: rerank-english-v3.0 -model_type: rerank -model_properties: - context_size: 5120 diff --git a/api/core/model_runtime/model_providers/cohere/rerank/rerank-multilingual-v2.0.yaml b/api/core/model_runtime/model_providers/cohere/rerank/rerank-multilingual-v2.0.yaml deleted file mode 100644 index c9b90387cf..0000000000 --- a/api/core/model_runtime/model_providers/cohere/rerank/rerank-multilingual-v2.0.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: rerank-multilingual-v2.0 -model_type: rerank -model_properties: - context_size: 5120 diff --git a/api/core/model_runtime/model_providers/cohere/rerank/rerank-multilingual-v3.0.yaml b/api/core/model_runtime/model_providers/cohere/rerank/rerank-multilingual-v3.0.yaml deleted file mode 100644 index 4f6690ba76..0000000000 --- a/api/core/model_runtime/model_providers/cohere/rerank/rerank-multilingual-v3.0.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: rerank-multilingual-v3.0 -model_type: rerank -model_properties: - context_size: 5120 diff --git a/api/core/model_runtime/model_providers/cohere/rerank/rerank.py b/api/core/model_runtime/model_providers/cohere/rerank/rerank.py deleted file mode 100644 index aba8fedbc0..0000000000 --- a/api/core/model_runtime/model_providers/cohere/rerank/rerank.py +++ /dev/null @@ -1,125 +0,0 @@ -from typing import Optional - -import cohere -from cohere.core import RequestOptions - -from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.rerank_model import RerankModel - - -class CohereRerankModel(RerankModel): - """ - Model class for Cohere rerank model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - query: str, - docs: list[str], - score_threshold: Optional[float] = None, - top_n: Optional[int] = None, - user: Optional[str] = None, - ) -> RerankResult: - """ - Invoke rerank model - - :param model: model name - :param credentials: model credentials - :param query: search query - :param docs: docs for reranking - :param score_threshold: score threshold - :param top_n: top n - :param user: unique user id - :return: rerank result - """ - if len(docs) == 0: - return RerankResult(model=model, docs=docs) - - # initialize client - client = cohere.Client(credentials.get("api_key"), base_url=credentials.get("base_url")) - response = client.rerank( - query=query, - documents=docs, - model=model, - top_n=top_n, - return_documents=True, - request_options=RequestOptions(max_retries=0), - ) - - rerank_documents = [] - for idx, result in enumerate(response.results): - # format document - rerank_document = RerankDocument( - index=result.index, - text=result.document.text, - score=result.relevance_score, - ) - - # score threshold check - if score_threshold is not None: - if result.relevance_score >= score_threshold: - rerank_documents.append(rerank_document) - else: - rerank_documents.append(rerank_document) - - return RerankResult(model=model, docs=rerank_documents) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self.invoke( - model=model, - credentials=credentials, - query="What is the capital of the United States?", - docs=[ - "Carson City is the capital city of the American state of Nevada. At the 2010 United States " - "Census, Carson City had a population of 55,274.", - "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " - "are a political division controlled by the United States. Its capital is Saipan.", - ], - score_threshold=0.8, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [cohere.errors.service_unavailable_error.ServiceUnavailableError], - InvokeServerUnavailableError: [cohere.errors.internal_server_error.InternalServerError], - InvokeRateLimitError: [cohere.errors.too_many_requests_error.TooManyRequestsError], - InvokeAuthorizationError: [ - cohere.errors.unauthorized_error.UnauthorizedError, - cohere.errors.forbidden_error.ForbiddenError, - ], - InvokeBadRequestError: [ - cohere.core.api_error.ApiError, - cohere.errors.bad_request_error.BadRequestError, - cohere.errors.not_found_error.NotFoundError, - ], - } diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/__init__.py b/api/core/model_runtime/model_providers/cohere/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/_position.yaml b/api/core/model_runtime/model_providers/cohere/text_embedding/_position.yaml deleted file mode 100644 index 967a946f34..0000000000 --- a/api/core/model_runtime/model_providers/cohere/text_embedding/_position.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- embed-multilingual-v3.0 -- embed-multilingual-light-v3.0 -- embed-english-v3.0 -- embed-english-light-v3.0 -- embed-multilingual-v2.0 -- embed-english-v2.0 -- embed-english-light-v2.0 diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-english-light-v2.0.yaml b/api/core/model_runtime/model_providers/cohere/text_embedding/embed-english-light-v2.0.yaml deleted file mode 100644 index 8d2aaf1737..0000000000 --- a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-english-light-v2.0.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: embed-english-light-v2.0 -model_type: text-embedding -model_properties: - context_size: 1024 - max_chunks: 48 -pricing: - input: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-english-light-v3.0.yaml b/api/core/model_runtime/model_providers/cohere/text_embedding/embed-english-light-v3.0.yaml deleted file mode 100644 index 43b79922e3..0000000000 --- a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-english-light-v3.0.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: embed-english-light-v3.0 -model_type: text-embedding -model_properties: - context_size: 384 - max_chunks: 48 -pricing: - input: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-english-v2.0.yaml b/api/core/model_runtime/model_providers/cohere/text_embedding/embed-english-v2.0.yaml deleted file mode 100644 index acee82b202..0000000000 --- a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-english-v2.0.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: embed-english-v2.0 -model_type: text-embedding -model_properties: - context_size: 4096 - max_chunks: 48 -pricing: - input: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-english-v3.0.yaml b/api/core/model_runtime/model_providers/cohere/text_embedding/embed-english-v3.0.yaml deleted file mode 100644 index 0ad713253e..0000000000 --- a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-english-v3.0.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: embed-english-v3.0 -model_type: text-embedding -model_properties: - context_size: 1024 - max_chunks: 48 -pricing: - input: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-multilingual-light-v3.0.yaml b/api/core/model_runtime/model_providers/cohere/text_embedding/embed-multilingual-light-v3.0.yaml deleted file mode 100644 index c253067233..0000000000 --- a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-multilingual-light-v3.0.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: embed-multilingual-light-v3.0 -model_type: text-embedding -model_properties: - context_size: 384 - max_chunks: 48 -pricing: - input: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-multilingual-v2.0.yaml b/api/core/model_runtime/model_providers/cohere/text_embedding/embed-multilingual-v2.0.yaml deleted file mode 100644 index 4dbc37d5e8..0000000000 --- a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-multilingual-v2.0.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: embed-multilingual-v2.0 -model_type: text-embedding -model_properties: - context_size: 768 - max_chunks: 48 -pricing: - input: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-multilingual-v3.0.yaml b/api/core/model_runtime/model_providers/cohere/text_embedding/embed-multilingual-v3.0.yaml deleted file mode 100644 index ec689ada1b..0000000000 --- a/api/core/model_runtime/model_providers/cohere/text_embedding/embed-multilingual-v3.0.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: embed-multilingual-v3.0 -model_type: text-embedding -model_properties: - context_size: 1024 - max_chunks: 48 -pricing: - input: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/deepseek/__init__.py b/api/core/model_runtime/model_providers/deepseek/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.svg deleted file mode 100644 index 425494404f..0000000000 --- a/api/core/model_runtime/model_providers/deepseek/_assets/icon_l_en.svg +++ /dev/null @@ -1,22 +0,0 @@ - - - Created with Pixso. - - - - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.svg deleted file mode 100644 index aa854a7504..0000000000 --- a/api/core/model_runtime/model_providers/deepseek/_assets/icon_s_en.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/api/core/model_runtime/model_providers/deepseek/deepseek.py b/api/core/model_runtime/model_providers/deepseek/deepseek.py deleted file mode 100644 index 10feef8972..0000000000 --- a/api/core/model_runtime/model_providers/deepseek/deepseek.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class DeepSeekProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `deepseek-chat` model for validate, - # no matter what model you pass in, text completion model or chat model - model_instance.validate_credentials(model="deepseek-chat", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/deepseek/deepseek.yaml b/api/core/model_runtime/model_providers/deepseek/deepseek.yaml deleted file mode 100644 index 16abd358d6..0000000000 --- a/api/core/model_runtime/model_providers/deepseek/deepseek.yaml +++ /dev/null @@ -1,41 +0,0 @@ -provider: deepseek -label: - en_US: deepseek - zh_Hans: 深度求索 -description: - en_US: Models provided by deepseek, such as deepseek-chat、deepseek-coder. - zh_Hans: 深度求索提供的模型,例如 deepseek-chat、deepseek-coder 。 -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#c0cdff" -help: - title: - en_US: Get your API Key from deepseek - zh_Hans: 从深度求索获取 API Key - url: - en_US: https://platform.deepseek.com/api_keys -supported_model_types: - - llm -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: endpoint_url - label: - zh_Hans: 自定义 API endpoint 地址 - en_US: Custom API endpoint URL - type: text-input - required: false - placeholder: - zh_Hans: Base URL, e.g. https://api.deepseek.com/v1 or https://api.deepseek.com - en_US: Base URL, e.g. https://api.deepseek.com/v1 or https://api.deepseek.com diff --git a/api/core/model_runtime/model_providers/deepseek/llm/__init__.py b/api/core/model_runtime/model_providers/deepseek/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/deepseek/llm/_position.yaml b/api/core/model_runtime/model_providers/deepseek/llm/_position.yaml deleted file mode 100644 index 43d03f2ee9..0000000000 --- a/api/core/model_runtime/model_providers/deepseek/llm/_position.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- deepseek-chat -- deepseek-coder diff --git a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml deleted file mode 100644 index 4973ac8ad6..0000000000 --- a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-chat.yaml +++ /dev/null @@ -1,78 +0,0 @@ -model: deepseek-chat -label: - zh_Hans: deepseek-chat - en_US: deepseek-chat -model_type: llm -features: - - agent-thought - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 1 - min: 0.0 - max: 2.0 - help: - zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 - en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - - name: max_tokens - use_template: max_tokens - type: int - default: 4096 - min: 1 - max: 8192 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - type: float - default: 1 - min: 0.01 - max: 1.00 - help: - zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 - en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. - - name: logprobs - help: - zh_Hans: 是否返回所输出 token 的对数概率。如果为 true,则在 message 的 content 中返回每个输出 token 的对数概率。 - en_US: Whether to return the log probability of the output token. If true, returns the log probability of each output token in the content of message . - type: boolean - - name: top_logprobs - type: int - default: 0 - min: 0 - max: 20 - help: - zh_Hans: 一个介于 0 到 20 之间的整数 N,指定每个输出位置返回输出概率 top N 的 token,且返回这些 token 的对数概率。指定此参数时,logprobs 必须为 true。 - en_US: An integer N between 0 and 20, specifying that each output position returns the top N tokens with output probability, and returns the logarithmic probability of these tokens. When specifying this parameter, logprobs must be true. - - name: frequency_penalty - use_template: frequency_penalty - default: 0 - min: -2.0 - max: 2.0 - help: - zh_Hans: 介于 -2.0 和 2.0 之间的数字。如果该值为正,那么新 token 会根据其在已有文本中的出现频率受到相应的惩罚,降低模型重复相同内容的可能性。 - en_US: A number between -2.0 and 2.0. If the value is positive, new tokens are penalized based on their frequency of occurrence in existing text, reducing the likelihood that the model will repeat the same content. - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '1' - output: '2' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml b/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml deleted file mode 100644 index caafeadadd..0000000000 --- a/api/core/model_runtime/model_providers/deepseek/llm/deepseek-coder.yaml +++ /dev/null @@ -1,28 +0,0 @@ -model: deepseek-coder -label: - zh_Hans: deepseek-coder - en_US: deepseek-coder -model_type: llm -features: - - agent-thought - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 4096 - default: 1024 diff --git a/api/core/model_runtime/model_providers/deepseek/llm/llm.py b/api/core/model_runtime/model_providers/deepseek/llm/llm.py deleted file mode 100644 index 6d0a3ee262..0000000000 --- a/api/core/model_runtime/model_providers/deepseek/llm/llm.py +++ /dev/null @@ -1,116 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union -from urllib.parse import urlparse - -import tiktoken - -from core.model_runtime.entities.llm_entities import LLMResult -from core.model_runtime.entities.message_entities import ( - PromptMessage, - PromptMessageTool, -) -from core.model_runtime.model_providers.openai.llm.llm import OpenAILargeLanguageModel - - -class DeepSeekLargeLanguageModel(OpenAILargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._add_custom_parameters(credentials) - - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials) - super().validate_credentials(model, credentials) - - # refactored from openai model runtime, use cl100k_base for calculate token number - def _num_tokens_from_string(self, model: str, text: str, tools: Optional[list[PromptMessageTool]] = None) -> int: - """ - Calculate num tokens for text completion model with tiktoken package. - - :param model: model name - :param text: prompt text - :param tools: tools for tool calling - :return: number of tokens - """ - encoding = tiktoken.get_encoding("cl100k_base") - num_tokens = len(encoding.encode(text)) - - if tools: - num_tokens += self._num_tokens_for_tools(encoding, tools) - - return num_tokens - - # refactored from openai model runtime, use cl100k_base for calculate token number - def _num_tokens_from_messages( - self, model: str, messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None - ) -> int: - """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. - - Official documentation: https://github.com/openai/openai-cookbook/blob/ - main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" - encoding = tiktoken.get_encoding("cl100k_base") - tokens_per_message = 3 - tokens_per_name = 1 - - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - # Cast str(value) in case the message value is not a string - # This occurs with function messages - # TODO: The current token calculation method for the image type is not implemented, - # which need to download the image and then get the resolution for calculation, - # and will increase the request delay - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - if key == "tool_calls": - for tool_call in value: - for t_key, t_value in tool_call.items(): - num_tokens += len(encoding.encode(t_key)) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += len(encoding.encode(f_key)) - num_tokens += len(encoding.encode(f_value)) - else: - num_tokens += len(encoding.encode(t_key)) - num_tokens += len(encoding.encode(t_value)) - else: - num_tokens += len(encoding.encode(str(value))) - - if key == "name": - num_tokens += tokens_per_name - - # every reply is primed with assistant - num_tokens += 3 - - if tools: - num_tokens += self._num_tokens_for_tools(encoding, tools) - - return num_tokens - - @staticmethod - def _add_custom_parameters(credentials: dict) -> None: - credentials["mode"] = "chat" - credentials["openai_api_key"] = credentials["api_key"] - if "endpoint_url" not in credentials or credentials["endpoint_url"] == "": - credentials["openai_api_base"] = "https://api.deepseek.com" - else: - parsed_url = urlparse(credentials["endpoint_url"]) - credentials["openai_api_base"] = f"{parsed_url.scheme}://{parsed_url.netloc}" diff --git a/api/core/model_runtime/model_providers/fireworks/__init__.py b/api/core/model_runtime/model_providers/fireworks/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/fireworks/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/fireworks/_assets/icon_l_en.svg deleted file mode 100644 index 582605cc42..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/_assets/icon_l_en.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/fireworks/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/fireworks/_assets/icon_s_en.svg deleted file mode 100644 index 86eeba66f9..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/_assets/icon_s_en.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - diff --git a/api/core/model_runtime/model_providers/fireworks/_common.py b/api/core/model_runtime/model_providers/fireworks/_common.py deleted file mode 100644 index 378ced3a40..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/_common.py +++ /dev/null @@ -1,52 +0,0 @@ -from collections.abc import Mapping - -import openai - -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) - - -class _CommonFireworks: - def _to_credential_kwargs(self, credentials: Mapping) -> dict: - """ - Transform credentials to kwargs for model instance - - :param credentials: - :return: - """ - credentials_kwargs = { - "api_key": credentials["fireworks_api_key"], - "base_url": "https://api.fireworks.ai/inference/v1", - "max_retries": 1, - } - - return credentials_kwargs - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError], - InvokeServerUnavailableError: [openai.InternalServerError], - InvokeRateLimitError: [openai.RateLimitError], - InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError], - InvokeBadRequestError: [ - openai.BadRequestError, - openai.NotFoundError, - openai.UnprocessableEntityError, - openai.APIError, - ], - } diff --git a/api/core/model_runtime/model_providers/fireworks/fireworks.py b/api/core/model_runtime/model_providers/fireworks/fireworks.py deleted file mode 100644 index 15f25badab..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/fireworks.py +++ /dev/null @@ -1,27 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class FireworksProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - model_instance.validate_credentials( - model="accounts/fireworks/models/llama-v3p1-8b-instruct", credentials=credentials - ) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/fireworks/llm/__init__.py b/api/core/model_runtime/model_providers/fireworks/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/fireworks/llm/_position.yaml b/api/core/model_runtime/model_providers/fireworks/llm/_position.yaml deleted file mode 100644 index 9f7c1af68c..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/_position.yaml +++ /dev/null @@ -1,16 +0,0 @@ -- llama-v3p1-405b-instruct -- llama-v3p1-70b-instruct -- llama-v3p1-8b-instruct -- llama-v3-70b-instruct -- mixtral-8x22b-instruct -- mixtral-8x7b-instruct -- firefunction-v2 -- firefunction-v1 -- gemma2-9b-it -- llama-v3-70b-instruct-hf -- llama-v3-8b-instruct -- llama-v3-8b-instruct-hf -- mixtral-8x7b-instruct-hf -- mythomax-l2-13b -- phi-3-vision-128k-instruct -- yi-large diff --git a/api/core/model_runtime/model_providers/fireworks/llm/firefunction-v1.yaml b/api/core/model_runtime/model_providers/fireworks/llm/firefunction-v1.yaml deleted file mode 100644 index f6bac12832..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/firefunction-v1.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/firefunction-v1 -label: - zh_Hans: Firefunction V1 - en_US: Firefunction V1 -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.5' - output: '0.5' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/firefunction-v2.yaml b/api/core/model_runtime/model_providers/fireworks/llm/firefunction-v2.yaml deleted file mode 100644 index 2979cb46d5..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/firefunction-v2.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/firefunction-v2 -label: - zh_Hans: Firefunction V2 - en_US: Firefunction V2 -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.9' - output: '0.9' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/gemma2-9b-it.yaml b/api/core/model_runtime/model_providers/fireworks/llm/gemma2-9b-it.yaml deleted file mode 100644 index ee41a7e2fd..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/gemma2-9b-it.yaml +++ /dev/null @@ -1,45 +0,0 @@ -model: accounts/fireworks/models/gemma2-9b-it -label: - zh_Hans: Gemma2 9B Instruct - en_US: Gemma2 9B Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.2' - output: '0.2' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct-hf.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct-hf.yaml deleted file mode 100644 index 2ae89b8816..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct-hf.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/llama-v3-70b-instruct-hf -label: - zh_Hans: Llama3 70B Instruct(HF version) - en_US: Llama3 70B Instruct(HF version) -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.9' - output: '0.9' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct.yaml deleted file mode 100644 index 7c24b08ca5..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-70b-instruct.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/llama-v3-70b-instruct -label: - zh_Hans: Llama3 70B Instruct - en_US: Llama3 70B Instruct -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.9' - output: '0.9' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct-hf.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct-hf.yaml deleted file mode 100644 index 83507ef3e5..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct-hf.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/llama-v3-8b-instruct-hf -label: - zh_Hans: Llama3 8B Instruct(HF version) - en_US: Llama3 8B Instruct(HF version) -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.2' - output: '0.2' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct.yaml deleted file mode 100644 index d8ac9537b8..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3-8b-instruct.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/llama-v3-8b-instruct -label: - zh_Hans: Llama3 8B Instruct - en_US: Llama3 8B Instruct -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.2' - output: '0.2' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-405b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-405b-instruct.yaml deleted file mode 100644 index c4ddb3e924..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-405b-instruct.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/llama-v3p1-405b-instruct -label: - zh_Hans: Llama3.1 405B Instruct - en_US: Llama3.1 405B Instruct -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '3' - output: '3' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-70b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-70b-instruct.yaml deleted file mode 100644 index 62f84f87fa..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-70b-instruct.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/llama-v3p1-70b-instruct -label: - zh_Hans: Llama3.1 70B Instruct - en_US: Llama3.1 70B Instruct -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.2' - output: '0.2' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-8b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-8b-instruct.yaml deleted file mode 100644 index 9bb99c91b6..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p1-8b-instruct.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/llama-v3p1-8b-instruct -label: - zh_Hans: Llama3.1 8B Instruct - en_US: Llama3.1 8B Instruct -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.2' - output: '0.2' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llm.py b/api/core/model_runtime/model_providers/fireworks/llm/llm.py deleted file mode 100644 index 2dcf1adba6..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/llm.py +++ /dev/null @@ -1,610 +0,0 @@ -import logging -from collections.abc import Generator -from typing import Optional, Union, cast - -from openai import OpenAI, Stream -from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessageToolCall -from openai.types.chat.chat_completion_chunk import ChoiceDeltaFunctionCall, ChoiceDeltaToolCall -from openai.types.chat.chat_completion_message import FunctionCall - -from core.model_runtime.callbacks.base_callback import Callback -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.fireworks._common import _CommonFireworks - -logger = logging.getLogger(__name__) - -FIREWORKS_BLOCK_MODE_PROMPT = """You should always follow the instructions and output a valid {{block}} object. -The structure of the {{block}} object you can found in the instructions, use {"answer": "$your_answer"} as the default structure -if you are not sure about the structure. - - -{{instructions}} - -""" # noqa: E501 - - -class FireworksLargeLanguageModel(_CommonFireworks, LargeLanguageModel): - """ - Model class for Fireworks large language model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - - return self._chat_generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - def _code_block_mode_wrapper( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - callbacks: Optional[list[Callback]] = None, - ) -> Union[LLMResult, Generator]: - """ - Code block mode wrapper for invoking large language model - """ - if "response_format" in model_parameters and model_parameters["response_format"] in {"JSON", "XML"}: - stop = stop or [] - self._transform_chat_json_prompts( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - response_format=model_parameters["response_format"], - ) - model_parameters.pop("response_format") - - return self._invoke( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - def _transform_chat_json_prompts( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - response_format: str = "JSON", - ) -> None: - """ - Transform json prompts - """ - if stop is None: - stop = [] - if "```\n" not in stop: - stop.append("```\n") - if "\n```" not in stop: - stop.append("\n```") - - if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage): - prompt_messages[0] = SystemPromptMessage( - content=FIREWORKS_BLOCK_MODE_PROMPT.replace("{{instructions}}", prompt_messages[0].content).replace( - "{{block}}", response_format - ) - ) - prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}\n")) - else: - prompt_messages.insert( - 0, - SystemPromptMessage( - content=FIREWORKS_BLOCK_MODE_PROMPT.replace( - "{{instructions}}", f"Please output a valid {response_format} object." - ).replace("{{block}}", response_format) - ), - ) - prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}")) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: - """ - return self._num_tokens_from_messages(model, prompt_messages, tools) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - - client.chat.completions.create( - messages=[{"role": "user", "content": "ping"}], model=model, temperature=0, max_tokens=10, stream=False - ) - except Exception as e: - raise CredentialsValidateFailedError(str(e)) - - def _chat_generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - - extra_model_kwargs = {} - - if tools: - extra_model_kwargs["functions"] = [ - {"name": tool.name, "description": tool.description, "parameters": tool.parameters} for tool in tools - ] - - if stop: - extra_model_kwargs["stop"] = stop - - if user: - extra_model_kwargs["user"] = user - - # chat model - response = client.chat.completions.create( - messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages], - model=model, - stream=stream, - **model_parameters, - **extra_model_kwargs, - ) - - if stream: - return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools) - return self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools) - - def _handle_chat_generate_response( - self, - model: str, - credentials: dict, - response: ChatCompletion, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> LLMResult: - """ - Handle llm chat response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: llm response - """ - assistant_message = response.choices[0].message - # assistant_message_tool_calls = assistant_message.tool_calls - assistant_message_function_call = assistant_message.function_call - - # extract tool calls from response - # tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls) - function_call = self._extract_response_function_call(assistant_message_function_call) - tool_calls = [function_call] if function_call else [] - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=assistant_message.content, tool_calls=tool_calls) - - # calculate num tokens - if response.usage: - # transform usage - prompt_tokens = response.usage.prompt_tokens - completion_tokens = response.usage.completion_tokens - else: - # calculate num tokens - prompt_tokens = self._num_tokens_from_messages(model, prompt_messages, tools) - completion_tokens = self._num_tokens_from_messages(model, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - response = LLMResult( - model=response.model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - system_fingerprint=response.system_fingerprint, - ) - - return response - - def _handle_chat_generate_stream_response( - self, - model: str, - credentials: dict, - response: Stream[ChatCompletionChunk], - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> Generator: - """ - Handle llm chat stream response - - :param model: model name - :param response: response - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: llm response chunk generator - """ - full_assistant_content = "" - delta_assistant_message_function_call_storage: Optional[ChoiceDeltaFunctionCall] = None - prompt_tokens = 0 - completion_tokens = 0 - final_tool_calls = [] - final_chunk = LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=""), - ), - ) - - for chunk in response: - if len(chunk.choices) == 0: - if chunk.usage: - # calculate num tokens - prompt_tokens = chunk.usage.prompt_tokens - completion_tokens = chunk.usage.completion_tokens - continue - - delta = chunk.choices[0] - has_finish_reason = delta.finish_reason is not None - - if ( - not has_finish_reason - and (delta.delta.content is None or delta.delta.content == "") - and delta.delta.function_call is None - ): - continue - - # assistant_message_tool_calls = delta.delta.tool_calls - assistant_message_function_call = delta.delta.function_call - - # extract tool calls from response - if delta_assistant_message_function_call_storage is not None: - # handle process of stream function call - if assistant_message_function_call: - # message has not ended ever - delta_assistant_message_function_call_storage.arguments += assistant_message_function_call.arguments - continue - else: - # message has ended - assistant_message_function_call = delta_assistant_message_function_call_storage - delta_assistant_message_function_call_storage = None - else: - if assistant_message_function_call: - # start of stream function call - delta_assistant_message_function_call_storage = assistant_message_function_call - if delta_assistant_message_function_call_storage.arguments is None: - delta_assistant_message_function_call_storage.arguments = "" - if not has_finish_reason: - continue - - # tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls) - function_call = self._extract_response_function_call(assistant_message_function_call) - tool_calls = [function_call] if function_call else [] - if tool_calls: - final_tool_calls.extend(tool_calls) - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls) - - full_assistant_content += delta.delta.content or "" - - if has_finish_reason: - final_chunk = LLMResultChunk( - model=chunk.model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - finish_reason=delta.finish_reason, - ), - ) - else: - yield LLMResultChunk( - model=chunk.model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - ), - ) - - if not prompt_tokens: - prompt_tokens = self._num_tokens_from_messages(model, prompt_messages, tools) - - if not completion_tokens: - full_assistant_prompt_message = AssistantPromptMessage( - content=full_assistant_content, tool_calls=final_tool_calls - ) - completion_tokens = self._num_tokens_from_messages(model, [full_assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - final_chunk.delta.usage = usage - - yield final_chunk - - def _extract_response_tool_calls( - self, response_tool_calls: list[ChatCompletionMessageToolCall | ChoiceDeltaToolCall] - ) -> list[AssistantPromptMessage.ToolCall]: - """ - Extract tool calls from response - - :param response_tool_calls: response tool calls - :return: list of tool calls - """ - tool_calls = [] - if response_tool_calls: - for response_tool_call in response_tool_calls: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_tool_call.function.name, arguments=response_tool_call.function.arguments - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_tool_call.id, type=response_tool_call.type, function=function - ) - tool_calls.append(tool_call) - - return tool_calls - - def _extract_response_function_call( - self, response_function_call: FunctionCall | ChoiceDeltaFunctionCall - ) -> AssistantPromptMessage.ToolCall: - """ - Extract function call from response - - :param response_function_call: response function call - :return: tool call - """ - tool_call = None - if response_function_call: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_function_call.name, arguments=response_function_call.arguments - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_function_call.name, type="function", function=function - ) - - return tool_call - - def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: - """ - Convert PromptMessage to dict for Fireworks API - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - sub_messages = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - sub_message_dict = {"type": "text", "text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - sub_message_dict = { - "type": "image_url", - "image_url": {"url": message_content.data, "detail": message_content.detail.value}, - } - sub_messages.append(sub_message_dict) - - message_dict = {"role": "user", "content": sub_messages} - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - if message.tool_calls: - # message_dict["tool_calls"] = [tool_call.dict() for tool_call in - # message.tool_calls] - function_call = message.tool_calls[0] - message_dict["function_call"] = { - "name": function_call.function.name, - "arguments": function_call.function.arguments, - } - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - # message_dict = { - # "role": "tool", - # "content": message.content, - # "tool_call_id": message.tool_call_id - # } - message_dict = {"role": "function", "content": message.content, "name": message.tool_call_id} - else: - raise ValueError(f"Got unknown type {message}") - - if message.name: - message_dict["name"] = message.name - - return message_dict - - def _num_tokens_from_messages( - self, - model: str, - messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - credentials: dict = None, - ) -> int: - """ - Approximate num tokens with GPT2 tokenizer. - """ - - tokens_per_message = 3 - tokens_per_name = 1 - - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - # Cast str(value) in case the message value is not a string - # This occurs with function messages - # TODO: The current token calculation method for the image type is not implemented, - # which need to download the image and then get the resolution for calculation, - # and will increase the request delay - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - if key == "tool_calls": - for tool_call in value: - for t_key, t_value in tool_call.items(): - num_tokens += self._get_num_tokens_by_gpt2(t_key) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += self._get_num_tokens_by_gpt2(f_key) - num_tokens += self._get_num_tokens_by_gpt2(f_value) - else: - num_tokens += self._get_num_tokens_by_gpt2(t_key) - num_tokens += self._get_num_tokens_by_gpt2(t_value) - else: - num_tokens += self._get_num_tokens_by_gpt2(str(value)) - - if key == "name": - num_tokens += tokens_per_name - - # every reply is primed with assistant - num_tokens += 3 - - if tools: - num_tokens += self._num_tokens_for_tools(tools) - - return num_tokens - - def _num_tokens_for_tools(self, tools: list[PromptMessageTool]) -> int: - """ - Calculate num tokens for tool calling with tiktoken package. - - :param tools: tools for tool calling - :return: number of tokens - """ - num_tokens = 0 - for tool in tools: - num_tokens += self._get_num_tokens_by_gpt2("type") - num_tokens += self._get_num_tokens_by_gpt2("function") - num_tokens += self._get_num_tokens_by_gpt2("function") - - # calculate num tokens for function object - num_tokens += self._get_num_tokens_by_gpt2("name") - num_tokens += self._get_num_tokens_by_gpt2(tool.name) - num_tokens += self._get_num_tokens_by_gpt2("description") - num_tokens += self._get_num_tokens_by_gpt2(tool.description) - parameters = tool.parameters - num_tokens += self._get_num_tokens_by_gpt2("parameters") - if "title" in parameters: - num_tokens += self._get_num_tokens_by_gpt2("title") - num_tokens += self._get_num_tokens_by_gpt2(parameters.get("title")) - num_tokens += self._get_num_tokens_by_gpt2("type") - num_tokens += self._get_num_tokens_by_gpt2(parameters.get("type")) - if "properties" in parameters: - num_tokens += self._get_num_tokens_by_gpt2("properties") - for key, value in parameters.get("properties").items(): - num_tokens += self._get_num_tokens_by_gpt2(key) - for field_key, field_value in value.items(): - num_tokens += self._get_num_tokens_by_gpt2(field_key) - if field_key == "enum": - for enum_field in field_value: - num_tokens += 3 - num_tokens += self._get_num_tokens_by_gpt2(enum_field) - else: - num_tokens += self._get_num_tokens_by_gpt2(field_key) - num_tokens += self._get_num_tokens_by_gpt2(str(field_value)) - if "required" in parameters: - num_tokens += self._get_num_tokens_by_gpt2("required") - for required_field in parameters["required"]: - num_tokens += 3 - num_tokens += self._get_num_tokens_by_gpt2(required_field) - - return num_tokens diff --git a/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x22b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x22b-instruct.yaml deleted file mode 100644 index 87d977e26c..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x22b-instruct.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/mixtral-8x22b-instruct -label: - zh_Hans: Mixtral MoE 8x22B Instruct - en_US: Mixtral MoE 8x22B Instruct -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 65536 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '1.2' - output: '1.2' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct-hf.yaml b/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct-hf.yaml deleted file mode 100644 index e3d5a90858..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct-hf.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/mixtral-8x7b-instruct-hf -label: - zh_Hans: Mixtral MoE 8x7B Instruct(HF version) - en_US: Mixtral MoE 8x7B Instruct(HF version) -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.5' - output: '0.5' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct.yaml deleted file mode 100644 index 45f632ceff..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/mixtral-8x7b-instruct.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/mixtral-8x7b-instruct -label: - zh_Hans: Mixtral MoE 8x7B Instruct - en_US: Mixtral MoE 8x7B Instruct -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.5' - output: '0.5' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/mythomax-l2-13b.yaml b/api/core/model_runtime/model_providers/fireworks/llm/mythomax-l2-13b.yaml deleted file mode 100644 index 9c3486ba10..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/mythomax-l2-13b.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/mythomax-l2-13b -label: - zh_Hans: MythoMax L2 13b - en_US: MythoMax L2 13b -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.2' - output: '0.2' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/phi-3-vision-128k-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/phi-3-vision-128k-instruct.yaml deleted file mode 100644 index e399f2edb1..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/phi-3-vision-128k-instruct.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/phi-3-vision-128k-instruct -label: - zh_Hans: Phi3.5 Vision Instruct - en_US: Phi3.5 Vision Instruct -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.2' - output: '0.2' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/yi-large.yaml b/api/core/model_runtime/model_providers/fireworks/llm/yi-large.yaml deleted file mode 100644 index bb4b6f994e..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/yi-large.yaml +++ /dev/null @@ -1,45 +0,0 @@ -model: accounts/yi-01-ai/models/yi-large -label: - zh_Hans: Yi-Large - en_US: Yi-Large -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '3' - output: '3' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fishaudio/__init__.py b/api/core/model_runtime/model_providers/fishaudio/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/fishaudio/_assets/fishaudio_l_en.svg b/api/core/model_runtime/model_providers/fishaudio/_assets/fishaudio_l_en.svg deleted file mode 100644 index d6f7723bd5..0000000000 --- a/api/core/model_runtime/model_providers/fishaudio/_assets/fishaudio_l_en.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/fishaudio/_assets/fishaudio_s_en.svg b/api/core/model_runtime/model_providers/fishaudio/_assets/fishaudio_s_en.svg deleted file mode 100644 index d6f7723bd5..0000000000 --- a/api/core/model_runtime/model_providers/fishaudio/_assets/fishaudio_s_en.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/fishaudio/fishaudio.py b/api/core/model_runtime/model_providers/fishaudio/fishaudio.py deleted file mode 100644 index 3bc4b533e0..0000000000 --- a/api/core/model_runtime/model_providers/fishaudio/fishaudio.py +++ /dev/null @@ -1,26 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class FishAudioProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - For debugging purposes, this method now always passes validation. - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.TTS) - model_instance.validate_credentials(credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/fishaudio/tts/__init__.py b/api/core/model_runtime/model_providers/fishaudio/tts/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/fishaudio/tts/tts.py b/api/core/model_runtime/model_providers/fishaudio/tts/tts.py deleted file mode 100644 index 895a7a914c..0000000000 --- a/api/core/model_runtime/model_providers/fishaudio/tts/tts.py +++ /dev/null @@ -1,158 +0,0 @@ -from typing import Optional - -import httpx - -from core.model_runtime.errors.invoke import InvokeBadRequestError, InvokeError -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.tts_model import TTSModel - - -class FishAudioText2SpeechModel(TTSModel): - """ - Model class for Fish.audio Text to Speech model. - """ - - def get_tts_model_voices(self, model: str, credentials: dict, language: Optional[str] = None) -> list: - api_base = credentials.get("api_base", "https://api.fish.audio") - api_key = credentials.get("api_key") - use_public_models = credentials.get("use_public_models", "false") == "true" - - params = { - "self": str(not use_public_models).lower(), - "page_size": "100", - } - - if language is not None: - if "-" in language: - language = language.split("-")[0] - params["language"] = language - - results = httpx.get( - f"{api_base}/model", - headers={"Authorization": f"Bearer {api_key}"}, - params=params, - ) - - results.raise_for_status() - data = results.json() - - return [{"name": i["title"], "value": i["_id"]} for i in data["items"]] - - def _invoke( - self, - model: str, - tenant_id: str, - credentials: dict, - content_text: str, - voice: str, - user: Optional[str] = None, - ) -> any: - """ - Invoke text2speech model - - :param model: model name - :param tenant_id: user tenant id - :param credentials: model credentials - :param voice: model timbre - :param content_text: text content to be translated - :param user: unique user id - :return: generator yielding audio chunks - """ - - return self._tts_invoke_streaming( - model=model, - credentials=credentials, - content_text=content_text, - voice=voice, - ) - - def validate_credentials(self, credentials: dict, user: Optional[str] = None) -> None: - """ - Validate credentials for text2speech model - - :param credentials: model credentials - :param user: unique user id - """ - - try: - self.get_tts_model_voices( - None, - credentials={ - "api_key": credentials["api_key"], - "api_base": credentials["api_base"], - # Disable public models will trigger a 403 error if user is not logged in - "use_public_models": "false", - }, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str, voice: str) -> any: - """ - Invoke streaming text2speech model - :param model: model name - :param credentials: model credentials - :param content_text: text content to be translated - :param voice: ID of the reference audio (if any) - :return: generator yielding audio chunks - """ - - try: - word_limit = self._get_model_word_limit(model, credentials) - if len(content_text) > word_limit: - sentences = self._split_text_into_sentences(content_text, max_length=word_limit) - else: - sentences = [content_text.strip()] - - for i in range(len(sentences)): - yield from self._tts_invoke_streaming_sentence( - credentials=credentials, content_text=sentences[i], voice=voice - ) - - except Exception as ex: - raise InvokeBadRequestError(str(ex)) - - def _tts_invoke_streaming_sentence(self, credentials: dict, content_text: str, voice: Optional[str] = None) -> any: - """ - Invoke streaming text2speech model - - :param credentials: model credentials - :param content_text: text content to be translated - :param voice: ID of the reference audio (if any) - :return: generator yielding audio chunks - """ - api_key = credentials.get("api_key") - api_url = credentials.get("api_base", "https://api.fish.audio") - latency = credentials.get("latency") - - if not api_key: - raise InvokeBadRequestError("API key is required") - - with httpx.stream( - "POST", - api_url + "/v1/tts", - json={"text": content_text, "reference_id": voice, "latency": latency}, - headers={ - "Authorization": f"Bearer {api_key}", - }, - timeout=None, - ) as response: - if response.status_code != 200: - raise InvokeBadRequestError(f"Error: {response.status_code} - {response.text}") - yield from response.iter_bytes() - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeBadRequestError: [ - httpx.HTTPStatusError, - ], - } diff --git a/api/core/model_runtime/model_providers/fishaudio/tts/tts.yaml b/api/core/model_runtime/model_providers/fishaudio/tts/tts.yaml deleted file mode 100644 index b4a446a957..0000000000 --- a/api/core/model_runtime/model_providers/fishaudio/tts/tts.yaml +++ /dev/null @@ -1,5 +0,0 @@ -model: tts-default -model_type: tts -model_properties: - word_limit: 1000 - audio_type: 'mp3' diff --git a/api/core/model_runtime/model_providers/google/__init__.py b/api/core/model_runtime/model_providers/google/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/google/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/google/_assets/icon_l_en.svg deleted file mode 100644 index bb23bffcf1..0000000000 --- a/api/core/model_runtime/model_providers/google/_assets/icon_l_en.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/google/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/google/_assets/icon_s_en.svg deleted file mode 100644 index c5c608cd7c..0000000000 --- a/api/core/model_runtime/model_providers/google/_assets/icon_s_en.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/google/google.py b/api/core/model_runtime/model_providers/google/google.py deleted file mode 100644 index 70f56a8337..0000000000 --- a/api/core/model_runtime/model_providers/google/google.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class GoogleProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `gemini-pro` model for validate, - model_instance.validate_credentials(model="gemini-pro", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/google/google.yaml b/api/core/model_runtime/model_providers/google/google.yaml deleted file mode 100644 index 69d4e371c4..0000000000 --- a/api/core/model_runtime/model_providers/google/google.yaml +++ /dev/null @@ -1,31 +0,0 @@ -provider: google -label: - en_US: Google -description: - en_US: Google's Gemini model. - zh_Hans: 谷歌提供的 Gemini 模型. -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#FCFDFF" -help: - title: - en_US: Get your API Key from Google - zh_Hans: 从 Google 获取 API Key - url: - en_US: https://ai.google.dev/ -supported_model_types: - - llm -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: google_api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/google/llm/__init__.py b/api/core/model_runtime/model_providers/google/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml deleted file mode 100644 index 4e0209890a..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-flash-8b-exp-0827 -label: - en_US: Gemini 1.5 Flash 8B 0827 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml deleted file mode 100644 index faabc5e4d1..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-flash-exp-0827 -label: - en_US: Gemini 1.5 Flash 0827 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml deleted file mode 100644 index a22fcca941..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-flash-latest -label: - en_US: Gemini 1.5 Flash Latest -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml deleted file mode 100644 index 97c68f7a18..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-pro-exp-0801 -label: - en_US: Gemini 1.5 Pro 0801 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 2097152 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml deleted file mode 100644 index 860e4816a1..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-pro-exp-0827 -label: - en_US: Gemini 1.5 Pro 0827 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 2097152 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml deleted file mode 100644 index d1bf7d269d..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-pro-latest -label: - en_US: Gemini 1.5 Pro Latest -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 2097152 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml deleted file mode 100644 index 2d213d56ad..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: gemini-pro-vision -label: - en_US: Gemini Pro Vision -model_type: llm -features: - - vision -model_properties: - mode: chat - context_size: 12288 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml deleted file mode 100644 index e2f487c1ee..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml +++ /dev/null @@ -1,47 +0,0 @@ -model: gemini-pro -label: - en_US: Gemini Pro -model_type: llm -features: - - agent-thought - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 30720 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 2048 - min: 1 - max: 2048 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/llm.py b/api/core/model_runtime/model_providers/google/llm/llm.py deleted file mode 100644 index e686ad08d9..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/llm.py +++ /dev/null @@ -1,443 +0,0 @@ -import base64 -import io -import json -import logging -from collections.abc import Generator -from typing import Optional, Union, cast - -import google.ai.generativelanguage as glm -import google.generativeai as genai -import requests -from google.api_core import exceptions -from google.generativeai.client import _ClientManager -from google.generativeai.types import ContentType, GenerateContentResponse -from google.generativeai.types.content_types import to_part -from PIL import Image - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel - -logger = logging.getLogger(__name__) - -GEMINI_BLOCK_MODE_PROMPT = """You should always follow the instructions and output a valid {{block}} object. -The structure of the {{block}} object you can found in the instructions, use {"answer": "$your_answer"} as the default structure -if you are not sure about the structure. - - -{{instructions}} - -""" # noqa: E501 - - -class GoogleLargeLanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # invoke model - return self._generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return:md = genai.GenerativeModel(model) - """ - prompt = self._convert_messages_to_prompt(prompt_messages) - - return self._get_num_tokens_by_gpt2(prompt) - - def _convert_messages_to_prompt(self, messages: list[PromptMessage]) -> str: - """ - Format a list of messages into a full prompt for the Google model - - :param messages: List of PromptMessage to combine. - :return: Combined string with necessary human_prompt and ai_prompt tags. - """ - messages = messages.copy() # don't mutate the original list - - text = "".join(self._convert_one_message_to_text(message) for message in messages) - - return text.rstrip() - - def _convert_tools_to_glm_tool(self, tools: list[PromptMessageTool]) -> glm.Tool: - """ - Convert tool messages to glm tools - - :param tools: tool messages - :return: glm tools - """ - return glm.Tool( - function_declarations=[ - glm.FunctionDeclaration( - name=tool.name, - parameters=glm.Schema( - type=glm.Type.OBJECT, - properties={ - key: { - "type_": value.get("type", "string").upper(), - "description": value.get("description", ""), - "enum": value.get("enum", []), - } - for key, value in tool.parameters.get("properties", {}).items() - }, - required=tool.parameters.get("required", []), - ), - ) - for tool in tools - ] - ) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - - try: - ping_message = SystemPromptMessage(content="ping") - self._generate(model, credentials, [ping_message], {"max_tokens_to_sample": 5}) - - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: credentials kwargs - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - config_kwargs = model_parameters.copy() - config_kwargs["max_output_tokens"] = config_kwargs.pop("max_tokens_to_sample", None) - - if stop: - config_kwargs["stop_sequences"] = stop - - google_model = genai.GenerativeModel(model_name=model) - - history = [] - - # hack for gemini-pro-vision, which currently does not support multi-turn chat - if model == "gemini-pro-vision": - last_msg = prompt_messages[-1] - content = self._format_message_to_glm_content(last_msg) - history.append(content) - else: - for msg in prompt_messages: # makes message roles strictly alternating - content = self._format_message_to_glm_content(msg) - if history and history[-1]["role"] == content["role"]: - history[-1]["parts"].extend(content["parts"]) - else: - history.append(content) - - # Create a new ClientManager with tenant's API key - new_client_manager = _ClientManager() - new_client_manager.configure(api_key=credentials["google_api_key"]) - new_custom_client = new_client_manager.make_client("generative") - - google_model._client = new_custom_client - - response = google_model.generate_content( - contents=history, - generation_config=genai.types.GenerationConfig(**config_kwargs), - stream=stream, - tools=self._convert_tools_to_glm_tool(tools) if tools else None, - request_options={"timeout": 600}, - ) - - if stream: - return self._handle_generate_stream_response(model, credentials, response, prompt_messages) - - return self._handle_generate_response(model, credentials, response, prompt_messages) - - def _handle_generate_response( - self, model: str, credentials: dict, response: GenerateContentResponse, prompt_messages: list[PromptMessage] - ) -> LLMResult: - """ - Handle llm response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response - """ - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=response.text) - - # calculate num tokens - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - result = LLMResult( - model=model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - ) - - return result - - def _handle_generate_stream_response( - self, model: str, credentials: dict, response: GenerateContentResponse, prompt_messages: list[PromptMessage] - ) -> Generator: - """ - Handle llm stream response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response chunk generator result - """ - index = -1 - for chunk in response: - for part in chunk.parts: - assistant_prompt_message = AssistantPromptMessage(content="") - - if part.text: - assistant_prompt_message.content += part.text - - if part.function_call: - assistant_prompt_message.tool_calls = [ - AssistantPromptMessage.ToolCall( - id=part.function_call.name, - type="function", - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=part.function_call.name, - arguments=json.dumps(dict(part.function_call.args.items())), - ), - ) - ] - - index += 1 - - if not response._done: - # transform assistant message to prompt message - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta(index=index, message=assistant_prompt_message), - ) - else: - # calculate num tokens - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=assistant_prompt_message, - finish_reason=str(chunk.candidates[0].finish_reason), - usage=usage, - ), - ) - - def _convert_one_message_to_text(self, message: PromptMessage) -> str: - """ - Convert a single message to a string. - - :param message: PromptMessage to convert. - :return: String representation of the message. - """ - human_prompt = "\n\nuser:" - ai_prompt = "\n\nmodel:" - - content = message.content - if isinstance(content, list): - content = "".join(c.data for c in content if c.type != PromptMessageContentType.IMAGE) - - if isinstance(message, UserPromptMessage): - message_text = f"{human_prompt} {content}" - elif isinstance(message, AssistantPromptMessage): - message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage | ToolPromptMessage): - message_text = f"{human_prompt} {content}" - else: - raise ValueError(f"Got unknown type {message}") - - return message_text - - def _format_message_to_glm_content(self, message: PromptMessage) -> ContentType: - """ - Format a single message into glm.Content for Google API - - :param message: one PromptMessage - :return: glm Content representation of message - """ - if isinstance(message, UserPromptMessage): - glm_content = {"role": "user", "parts": []} - if isinstance(message.content, str): - glm_content["parts"].append(to_part(message.content)) - else: - for c in message.content: - if c.type == PromptMessageContentType.TEXT: - glm_content["parts"].append(to_part(c.data)) - elif c.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, c) - if message_content.data.startswith("data:"): - metadata, base64_data = c.data.split(",", 1) - mime_type = metadata.split(";", 1)[0].split(":")[1] - else: - # fetch image data from url - try: - image_content = requests.get(message_content.data).content - with Image.open(io.BytesIO(image_content)) as img: - mime_type = f"image/{img.format.lower()}" - base64_data = base64.b64encode(image_content).decode("utf-8") - except Exception as ex: - raise ValueError(f"Failed to fetch image data from url {message_content.data}, {ex}") - blob = {"inline_data": {"mime_type": mime_type, "data": base64_data}} - glm_content["parts"].append(blob) - - return glm_content - elif isinstance(message, AssistantPromptMessage): - glm_content = {"role": "model", "parts": []} - if message.content: - glm_content["parts"].append(to_part(message.content)) - if message.tool_calls: - glm_content["parts"].append( - to_part( - glm.FunctionCall( - name=message.tool_calls[0].function.name, - args=json.loads(message.tool_calls[0].function.arguments), - ) - ) - ) - return glm_content - elif isinstance(message, SystemPromptMessage): - return {"role": "user", "parts": [to_part(message.content)]} - elif isinstance(message, ToolPromptMessage): - return { - "role": "function", - "parts": [ - glm.Part( - function_response=glm.FunctionResponse( - name=message.name, response={"response": message.content} - ) - ) - ], - } - else: - raise ValueError(f"Got unknown type {message}") - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the ermd = genai.GenerativeModel(model) error type thrown to the caller - The value is the md = genai.GenerativeModel(model) error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke emd = genai.GenerativeModel(model) error mapping - """ - return { - InvokeConnectionError: [exceptions.RetryError], - InvokeServerUnavailableError: [ - exceptions.ServiceUnavailable, - exceptions.InternalServerError, - exceptions.BadGateway, - exceptions.GatewayTimeout, - exceptions.DeadlineExceeded, - ], - InvokeRateLimitError: [exceptions.ResourceExhausted, exceptions.TooManyRequests], - InvokeAuthorizationError: [ - exceptions.Unauthenticated, - exceptions.PermissionDenied, - exceptions.Unauthenticated, - exceptions.Forbidden, - ], - InvokeBadRequestError: [ - exceptions.BadRequest, - exceptions.InvalidArgument, - exceptions.FailedPrecondition, - exceptions.OutOfRange, - exceptions.NotFound, - exceptions.MethodNotAllowed, - exceptions.Conflict, - exceptions.AlreadyExists, - exceptions.Aborted, - exceptions.LengthRequired, - exceptions.PreconditionFailed, - exceptions.RequestRangeNotSatisfiable, - exceptions.Cancelled, - ], - } diff --git a/api/core/model_runtime/model_providers/groq/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/groq/_assets/icon_l_en.svg deleted file mode 100644 index 2505a5f493..0000000000 --- a/api/core/model_runtime/model_providers/groq/_assets/icon_l_en.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/groq/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/groq/_assets/icon_s_en.svg deleted file mode 100644 index 087f37e471..0000000000 --- a/api/core/model_runtime/model_providers/groq/_assets/icon_s_en.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/api/core/model_runtime/model_providers/groq/groq.py b/api/core/model_runtime/model_providers/groq/groq.py deleted file mode 100644 index d0d5ff68f8..0000000000 --- a/api/core/model_runtime/model_providers/groq/groq.py +++ /dev/null @@ -1,26 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class GroqProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - model_instance.validate_credentials(model="llama3-8b-8192", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/groq/groq.yaml b/api/core/model_runtime/model_providers/groq/groq.yaml deleted file mode 100644 index db17cc8bdd..0000000000 --- a/api/core/model_runtime/model_providers/groq/groq.yaml +++ /dev/null @@ -1,32 +0,0 @@ -provider: groq -label: - zh_Hans: GroqCloud - en_US: GroqCloud -description: - en_US: GroqCloud provides access to the Groq Cloud API, which hosts models like LLama2 and Mixtral. - zh_Hans: GroqCloud 提供对 Groq Cloud API 的访问,其中托管了 LLama2 和 Mixtral 等模型。 -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#F5F5F4" -help: - title: - en_US: Get your API Key from GroqCloud - zh_Hans: 从 GroqCloud 获取 API Key - url: - en_US: https://console.groq.com/ -supported_model_types: - - llm -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/groq/llm/_position.yaml b/api/core/model_runtime/model_providers/groq/llm/_position.yaml deleted file mode 100644 index be115ca920..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/_position.yaml +++ /dev/null @@ -1,7 +0,0 @@ -- llama-3.1-405b-reasoning -- llama-3.1-70b-versatile -- llama-3.1-8b-instant -- llama3-70b-8192 -- llama3-8b-8192 -- mixtral-8x7b-32768 -- llama2-70b-4096 diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.1-405b-reasoning.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.1-405b-reasoning.yaml deleted file mode 100644 index 217785cea2..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/llama-3.1-405b-reasoning.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: llama-3.1-405b-reasoning -label: - zh_Hans: Llama-3.1-405b-reasoning - en_US: Llama-3.1-405b-reasoning -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.05' - output: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.1-70b-versatile.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.1-70b-versatile.yaml deleted file mode 100644 index ab5f6ab05e..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/llama-3.1-70b-versatile.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: llama-3.1-70b-versatile -label: - zh_Hans: Llama-3.1-70b-versatile - en_US: Llama-3.1-70b-versatile -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.05' - output: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.1-8b-instant.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.1-8b-instant.yaml deleted file mode 100644 index a82e64532e..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/llama-3.1-8b-instant.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: llama-3.1-8b-instant -label: - zh_Hans: Llama-3.1-8b-instant - en_US: Llama-3.1-8b-instant -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.05' - output: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama2-70b-4096.yaml b/api/core/model_runtime/model_providers/groq/llm/llama2-70b-4096.yaml deleted file mode 100644 index 384912b0dd..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/llama2-70b-4096.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: llama2-70b-4096 -label: - zh_Hans: Llama-2-70B-4096 - en_US: Llama-2-70B-4096 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 -pricing: - input: '0.7' - output: '0.8' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama3-70b-8192.yaml b/api/core/model_runtime/model_providers/groq/llm/llama3-70b-8192.yaml deleted file mode 100644 index 91d0e30765..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/llama3-70b-8192.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: llama3-70b-8192 -label: - zh_Hans: Llama-3-70B-8192 - en_US: Llama-3-70B-8192 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.59' - output: '0.79' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama3-8b-8192.yaml b/api/core/model_runtime/model_providers/groq/llm/llama3-8b-8192.yaml deleted file mode 100644 index b6154f761f..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/llama3-8b-8192.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: llama3-8b-8192 -label: - zh_Hans: Llama-3-8B-8192 - en_US: Llama-3-8B-8192 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.05' - output: '0.08' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llm.py b/api/core/model_runtime/model_providers/groq/llm/llm.py deleted file mode 100644 index 352a7b519e..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/llm.py +++ /dev/null @@ -1,31 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union - -from core.model_runtime.entities.llm_entities import LLMResult -from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel - - -class GroqLargeLanguageModel(OAIAPICompatLargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._add_custom_parameters(credentials) - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream) - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials) - super().validate_credentials(model, credentials) - - @staticmethod - def _add_custom_parameters(credentials: dict) -> None: - credentials["mode"] = "chat" - credentials["endpoint_url"] = "https://api.groq.com/openai/v1" diff --git a/api/core/model_runtime/model_providers/groq/llm/mixtral-8x7b-instruct-v0.1.yaml b/api/core/model_runtime/model_providers/groq/llm/mixtral-8x7b-instruct-v0.1.yaml deleted file mode 100644 index 0dc6678fa2..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/mixtral-8x7b-instruct-v0.1.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: mixtral-8x7b-32768 -label: - zh_Hans: Mixtral-8x7b-Instruct-v0.1 - en_US: Mixtral-8x7b-Instruct-v0.1 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 20480 -pricing: - input: '0.27' - output: '0.27' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/huggingface_hub/__init__.py b/api/core/model_runtime/model_providers/huggingface_hub/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/huggingface_hub/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/huggingface_hub/_assets/icon_l_en.svg deleted file mode 100644 index 70135a08de..0000000000 --- a/api/core/model_runtime/model_providers/huggingface_hub/_assets/icon_l_en.svg +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/huggingface_hub/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/huggingface_hub/_assets/icon_s_en.svg deleted file mode 100644 index 5a444f127f..0000000000 --- a/api/core/model_runtime/model_providers/huggingface_hub/_assets/icon_s_en.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/huggingface_hub/_common.py b/api/core/model_runtime/model_providers/huggingface_hub/_common.py deleted file mode 100644 index 3c4020b6ee..0000000000 --- a/api/core/model_runtime/model_providers/huggingface_hub/_common.py +++ /dev/null @@ -1,9 +0,0 @@ -from huggingface_hub.utils import BadRequestError, HfHubHTTPError - -from core.model_runtime.errors.invoke import InvokeBadRequestError, InvokeError - - -class _CommonHuggingfaceHub: - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return {InvokeBadRequestError: [HfHubHTTPError, BadRequestError]} diff --git a/api/core/model_runtime/model_providers/huggingface_hub/huggingface_hub.py b/api/core/model_runtime/model_providers/huggingface_hub/huggingface_hub.py deleted file mode 100644 index 54d2a2bf39..0000000000 --- a/api/core/model_runtime/model_providers/huggingface_hub/huggingface_hub.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class HuggingfaceHubProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/huggingface_hub/huggingface_hub.yaml b/api/core/model_runtime/model_providers/huggingface_hub/huggingface_hub.yaml deleted file mode 100644 index 1df234cf26..0000000000 --- a/api/core/model_runtime/model_providers/huggingface_hub/huggingface_hub.yaml +++ /dev/null @@ -1,102 +0,0 @@ -provider: huggingface_hub -label: - en_US: Hugging Face Model -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#FFF8DC" -help: - title: - en_US: Get your API key from Hugging Face Hub - zh_Hans: 从 Hugging Face Hub 获取 API Key - url: - en_US: https://huggingface.co/settings/tokens -supported_model_types: - - llm - - text-embedding -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - credential_form_schemas: - - variable: huggingfacehub_api_type - label: - en_US: Endpoint Type - zh_Hans: 端点类型 - type: radio - required: true - default: hosted_inference_api - options: - - value: hosted_inference_api - label: - en_US: Hosted Inference API - - value: inference_endpoints - label: - en_US: Inference Endpoints - - variable: huggingfacehub_api_token - label: - en_US: API Token - zh_Hans: API Token - type: secret-input - required: true - placeholder: - en_US: Enter your Hugging Face Hub API Token here - zh_Hans: 在此输入您的 Hugging Face Hub API Token - - variable: huggingface_namespace - label: - en_US: 'User Name / Organization Name' - zh_Hans: '用户名 / 组织名称' - type: text-input - required: true - placeholder: - en_US: 'Enter your User Name / Organization Name here' - zh_Hans: '在此输入您的用户名 / 组织名称' - show_on: - - variable: __model_type - value: text-embedding - - variable: huggingfacehub_api_type - value: inference_endpoints - - variable: huggingfacehub_endpoint_url - label: - en_US: Endpoint URL - zh_Hans: 端点 URL - type: text-input - required: true - placeholder: - en_US: Enter your Endpoint URL here - zh_Hans: 在此输入您的端点 URL - show_on: - - variable: huggingfacehub_api_type - value: inference_endpoints - - variable: task_type - label: - en_US: Task - zh_Hans: Task - type: select - options: - - value: text2text-generation - label: - en_US: Text-to-Text Generation - show_on: - - variable: __model_type - value: llm - - value: text-generation - label: - en_US: Text Generation - zh_Hans: 文本生成 - show_on: - - variable: __model_type - value: llm - - value: feature-extraction - label: - en_US: Feature Extraction - show_on: - - variable: __model_type - value: text-embedding - show_on: - - variable: huggingfacehub_api_type - value: inference_endpoints diff --git a/api/core/model_runtime/model_providers/huggingface_hub/llm/__init__.py b/api/core/model_runtime/model_providers/huggingface_hub/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py b/api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py deleted file mode 100644 index 9d29237fdd..0000000000 --- a/api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py +++ /dev/null @@ -1,313 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union - -from huggingface_hub import InferenceClient -from huggingface_hub.hf_api import HfApi -from huggingface_hub.utils import BadRequestError - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.defaults import PARAMETER_RULE_TEMPLATE -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - DefaultParameterName, - FetchFrom, - ModelPropertyKey, - ModelType, - ParameterRule, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.huggingface_hub._common import _CommonHuggingfaceHub - - -class HuggingfaceHubLargeLanguageModel(_CommonHuggingfaceHub, LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - client = InferenceClient(token=credentials["huggingfacehub_api_token"]) - - if credentials["huggingfacehub_api_type"] == "inference_endpoints": - model = credentials["huggingfacehub_endpoint_url"] - - if "baichuan" in model.lower(): - stream = False - - response = client.text_generation( - prompt=prompt_messages[0].content, - details=True, - stream=stream, - model=model, - stop_sequences=stop, - **model_parameters, - ) - - if stream: - return self._handle_generate_stream_response(model, credentials, prompt_messages, response) - - return self._handle_generate_response(model, credentials, prompt_messages, response) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - prompt = self._convert_messages_to_prompt(prompt_messages) - return self._get_num_tokens_by_gpt2(prompt) - - def validate_credentials(self, model: str, credentials: dict) -> None: - try: - if "huggingfacehub_api_type" not in credentials: - raise CredentialsValidateFailedError("Huggingface Hub Endpoint Type must be provided.") - - if credentials["huggingfacehub_api_type"] not in {"inference_endpoints", "hosted_inference_api"}: - raise CredentialsValidateFailedError("Huggingface Hub Endpoint Type is invalid.") - - if "huggingfacehub_api_token" not in credentials: - raise CredentialsValidateFailedError("Huggingface Hub Access Token must be provided.") - - if credentials["huggingfacehub_api_type"] == "inference_endpoints": - if "huggingfacehub_endpoint_url" not in credentials: - raise CredentialsValidateFailedError("Huggingface Hub Endpoint URL must be provided.") - - if "task_type" not in credentials: - raise CredentialsValidateFailedError("Huggingface Hub Task Type must be provided.") - elif credentials["huggingfacehub_api_type"] == "hosted_inference_api": - credentials["task_type"] = self._get_hosted_model_task_type( - credentials["huggingfacehub_api_token"], model - ) - - if credentials["task_type"] not in {"text2text-generation", "text-generation"}: - raise CredentialsValidateFailedError( - "Huggingface Hub Task Type must be one of text2text-generation, text-generation." - ) - - client = InferenceClient(token=credentials["huggingfacehub_api_token"]) - - if credentials["huggingfacehub_api_type"] == "inference_endpoints": - model = credentials["huggingfacehub_endpoint_url"] - - try: - client.text_generation(prompt="Who are you?", stream=True, model=model) - except BadRequestError as e: - raise CredentialsValidateFailedError( - "Only available for models running on with the `text-generation-inference`. " - "To learn more about the TGI project, please refer to https://github.com/huggingface/text-generation-inference." - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]: - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.LLM, - model_properties={ModelPropertyKey.MODE: LLMMode.COMPLETION.value}, - parameter_rules=self._get_customizable_model_parameter_rules(), - ) - - return entity - - @staticmethod - def _get_customizable_model_parameter_rules() -> list[ParameterRule]: - temperature_rule_dict = PARAMETER_RULE_TEMPLATE.get(DefaultParameterName.TEMPERATURE).copy() - temperature_rule_dict["name"] = "temperature" - temperature_rule = ParameterRule(**temperature_rule_dict) - temperature_rule.default = 0.5 - - top_p_rule_dict = PARAMETER_RULE_TEMPLATE.get(DefaultParameterName.TOP_P).copy() - top_p_rule_dict["name"] = "top_p" - top_p_rule = ParameterRule(**top_p_rule_dict) - top_p_rule.default = 0.5 - - top_k_rule = ParameterRule( - name="top_k", - label={ - "en_US": "Top K", - "zh_Hans": "Top K", - }, - type="int", - help={ - "en_US": "The number of highest probability vocabulary tokens to keep for top-k-filtering.", - "zh_Hans": "保留的最高概率词汇标记的数量。", - }, - required=False, - default=2, - min=1, - max=10, - precision=0, - ) - - max_new_tokens = ParameterRule( - name="max_new_tokens", - label={ - "en_US": "Max New Tokens", - "zh_Hans": "最大新标记", - }, - type="int", - help={ - "en_US": "Maximum number of generated tokens.", - "zh_Hans": "生成的标记的最大数量。", - }, - required=False, - default=20, - min=1, - max=4096, - precision=0, - ) - - seed = ParameterRule( - name="seed", - label={ - "en_US": "Random sampling seed", - "zh_Hans": "随机采样种子", - }, - type="int", - help={ - "en_US": "Random sampling seed.", - "zh_Hans": "随机采样种子。", - }, - required=False, - precision=0, - ) - - repetition_penalty = ParameterRule( - name="repetition_penalty", - label={ - "en_US": "Repetition Penalty", - "zh_Hans": "重复惩罚", - }, - type="float", - help={ - "en_US": "The parameter for repetition penalty. 1.0 means no penalty.", - "zh_Hans": "重复惩罚的参数。1.0 表示没有惩罚。", - }, - required=False, - precision=1, - ) - - return [temperature_rule, top_k_rule, top_p_rule, max_new_tokens, seed, repetition_penalty] - - def _handle_generate_stream_response( - self, model: str, credentials: dict, prompt_messages: list[PromptMessage], response: Generator - ) -> Generator: - index = -1 - for chunk in response: - # skip special tokens - if chunk.token.special: - continue - - index += 1 - - assistant_prompt_message = AssistantPromptMessage(content=chunk.token.text) - - if chunk.details: - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) - - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=assistant_prompt_message, - usage=usage, - finish_reason=chunk.details.finish_reason, - ), - ) - else: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=assistant_prompt_message, - ), - ) - - def _handle_generate_response( - self, model: str, credentials: dict, prompt_messages: list[PromptMessage], response: any - ) -> LLMResult: - if isinstance(response, str): - content = response - else: - content = response.generated_text - - assistant_prompt_message = AssistantPromptMessage(content=content) - - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) - - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - result = LLMResult( - model=model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - ) - return result - - @staticmethod - def _get_hosted_model_task_type(huggingfacehub_api_token: str, model_name: str): - hf_api = HfApi(token=huggingfacehub_api_token) - model_info = hf_api.model_info(repo_id=model_name) - - try: - if not model_info: - raise ValueError(f"Model {model_name} not found.") - - if "inference" in model_info.cardData and not model_info.cardData["inference"]: - raise ValueError(f"Inference API has been turned off for this model {model_name}.") - - valid_tasks = ("text2text-generation", "text-generation") - if model_info.pipeline_tag not in valid_tasks: - raise ValueError(f"Model {model_name} is not a valid task, must be one of {valid_tasks}.") - except Exception as e: - raise CredentialsValidateFailedError(f"{str(e)}") - - return model_info.pipeline_tag - - def _convert_messages_to_prompt(self, messages: list[PromptMessage]) -> str: - messages = messages.copy() # don't mutate the original list - - text = "".join(self._convert_one_message_to_text(message) for message in messages) - - return text.rstrip() - - @staticmethod - def _convert_one_message_to_text(message: PromptMessage) -> str: - human_prompt = "\n\nHuman:" - ai_prompt = "\n\nAssistant:" - content = message.content - - if isinstance(message, UserPromptMessage): - message_text = f"{human_prompt} {content}" - elif isinstance(message, AssistantPromptMessage): - message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage): - message_text = content - else: - raise ValueError(f"Got unknown type {message}") - - return message_text diff --git a/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/__init__.py b/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/huggingface_tei/__init__.py b/api/core/model_runtime/model_providers/huggingface_tei/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/huggingface_tei/huggingface_tei.py b/api/core/model_runtime/model_providers/huggingface_tei/huggingface_tei.py deleted file mode 100644 index 97d7e28dc6..0000000000 --- a/api/core/model_runtime/model_providers/huggingface_tei/huggingface_tei.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class HuggingfaceTeiProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/huggingface_tei/huggingface_tei.yaml b/api/core/model_runtime/model_providers/huggingface_tei/huggingface_tei.yaml deleted file mode 100644 index f3a912d84d..0000000000 --- a/api/core/model_runtime/model_providers/huggingface_tei/huggingface_tei.yaml +++ /dev/null @@ -1,36 +0,0 @@ -provider: huggingface_tei -label: - en_US: Text Embedding Inference -description: - en_US: A blazing fast inference solution for text embeddings models. - zh_Hans: 用于文本嵌入模型的超快速推理解决方案。 -background: "#FFF8DC" -help: - title: - en_US: How to deploy Text Embedding Inference - zh_Hans: 如何部署 Text Embedding Inference - url: - en_US: https://github.com/huggingface/text-embeddings-inference -supported_model_types: - - text-embedding - - rerank -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: server_url - label: - zh_Hans: 服务器URL - en_US: Server url - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入Text Embedding Inference的服务器地址,如 http://192.168.1.100:8080 - en_US: Enter the url of your Text Embedding Inference, e.g. http://192.168.1.100:8080 diff --git a/api/core/model_runtime/model_providers/huggingface_tei/rerank/__init__.py b/api/core/model_runtime/model_providers/huggingface_tei/rerank/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/huggingface_tei/rerank/rerank.py b/api/core/model_runtime/model_providers/huggingface_tei/rerank/rerank.py deleted file mode 100644 index 74a1dfc3ff..0000000000 --- a/api/core/model_runtime/model_providers/huggingface_tei/rerank/rerank.py +++ /dev/null @@ -1,136 +0,0 @@ -from typing import Optional - -import httpx - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType -from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.rerank_model import RerankModel -from core.model_runtime.model_providers.huggingface_tei.tei_helper import TeiHelper - - -class HuggingfaceTeiRerankModel(RerankModel): - """ - Model class for Text Embedding Inference rerank model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - query: str, - docs: list[str], - score_threshold: Optional[float] = None, - top_n: Optional[int] = None, - user: Optional[str] = None, - ) -> RerankResult: - """ - Invoke rerank model - - :param model: model name - :param credentials: model credentials - :param query: search query - :param docs: docs for reranking - :param score_threshold: score threshold - :param top_n: top n - :param user: unique user id - :return: rerank result - """ - if len(docs) == 0: - return RerankResult(model=model, docs=[]) - server_url = credentials["server_url"] - - server_url = server_url.removesuffix("/") - - try: - results = TeiHelper.invoke_rerank(server_url, query, docs) - - rerank_documents = [] - for result in results: - rerank_document = RerankDocument( - index=result["index"], - text=result["text"], - score=result["score"], - ) - if score_threshold is None or result["score"] >= score_threshold: - rerank_documents.append(rerank_document) - if top_n is not None and len(rerank_documents) >= top_n: - break - - return RerankResult(model=model, docs=rerank_documents) - except httpx.HTTPStatusError as e: - raise InvokeServerUnavailableError(str(e)) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - server_url = credentials["server_url"] - extra_args = TeiHelper.get_tei_extra_parameter(server_url, model) - if extra_args.model_type != "reranker": - raise CredentialsValidateFailedError("Current model is not a rerank model") - - credentials["context_size"] = extra_args.max_input_length - - self.invoke( - model=model, - credentials=credentials, - query="Whose kasumi", - docs=[ - 'Kasumi is a girl\'s name of Japanese origin meaning "mist".', - "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music ", - "and she leads a team named PopiParty.", - ], - score_threshold=0.8, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.RERANK, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)), - }, - parameter_rules=[], - ) - - return entity diff --git a/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py b/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py deleted file mode 100644 index 81ab249214..0000000000 --- a/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py +++ /dev/null @@ -1,182 +0,0 @@ -from threading import Lock -from time import time -from typing import Optional - -import httpx -from requests.adapters import HTTPAdapter -from requests.exceptions import ConnectionError, MissingSchema, Timeout -from requests.sessions import Session -from yarl import URL - - -class TeiModelExtraParameter: - model_type: str - max_input_length: int - max_client_batch_size: int - - def __init__(self, model_type: str, max_input_length: int, max_client_batch_size: Optional[int] = None) -> None: - self.model_type = model_type - self.max_input_length = max_input_length - self.max_client_batch_size = max_client_batch_size - - -cache = {} -cache_lock = Lock() - - -class TeiHelper: - @staticmethod - def get_tei_extra_parameter(server_url: str, model_name: str) -> TeiModelExtraParameter: - TeiHelper._clean_cache() - with cache_lock: - if model_name not in cache: - cache[model_name] = { - "expires": time() + 300, - "value": TeiHelper._get_tei_extra_parameter(server_url), - } - return cache[model_name]["value"] - - @staticmethod - def _clean_cache() -> None: - try: - with cache_lock: - expired_keys = [model_uid for model_uid, model in cache.items() if model["expires"] < time()] - for model_uid in expired_keys: - del cache[model_uid] - except RuntimeError as e: - pass - - @staticmethod - def _get_tei_extra_parameter(server_url: str) -> TeiModelExtraParameter: - """ - get tei model extra parameter like model_type, max_input_length, max_batch_requests - """ - - url = str(URL(server_url) / "info") - - # this method is surrounded by a lock, and default requests may hang forever, - # so we just set a Adapter with max_retries=3 - session = Session() - session.mount("http://", HTTPAdapter(max_retries=3)) - session.mount("https://", HTTPAdapter(max_retries=3)) - - try: - response = session.get(url, timeout=10) - except (MissingSchema, ConnectionError, Timeout) as e: - raise RuntimeError(f"get tei model extra parameter failed, url: {url}, error: {e}") - if response.status_code != 200: - raise RuntimeError( - f"get tei model extra parameter failed, status code: {response.status_code}, response: {response.text}" - ) - - response_json = response.json() - - model_type = response_json.get("model_type", {}) - if len(model_type.keys()) < 1: - raise RuntimeError("model_type is empty") - model_type = list(model_type.keys())[0] - if model_type not in {"embedding", "reranker"}: - raise RuntimeError(f"invalid model_type: {model_type}") - - max_input_length = response_json.get("max_input_length", 512) - max_client_batch_size = response_json.get("max_client_batch_size", 1) - - return TeiModelExtraParameter( - model_type=model_type, max_input_length=max_input_length, max_client_batch_size=max_client_batch_size - ) - - @staticmethod - def invoke_tokenize(server_url: str, texts: list[str]) -> list[list[dict]]: - """ - Invoke tokenize endpoint - - Example response: - [ - [ - { - "id": 0, - "text": "", - "special": true, - "start": null, - "stop": null - }, - { - "id": 7704, - "text": "str", - "special": false, - "start": 0, - "stop": 3 - }, - < MORE TOKENS > - ] - ] - - :param server_url: server url - :param texts: texts to tokenize - """ - resp = httpx.post( - f"{server_url}/tokenize", - json={"inputs": texts}, - ) - resp.raise_for_status() - return resp.json() - - @staticmethod - def invoke_embeddings(server_url: str, texts: list[str]) -> dict: - """ - Invoke embeddings endpoint - - Example response: - { - "object": "list", - "data": [ - { - "object": "embedding", - "embedding": [...], - "index": 0 - } - ], - "model": "MODEL_NAME", - "usage": { - "prompt_tokens": 3, - "total_tokens": 3 - } - } - - :param server_url: server url - :param texts: texts to embed - """ - # Use OpenAI compatible API here, which has usage tracking - resp = httpx.post( - f"{server_url}/v1/embeddings", - json={"input": texts}, - ) - resp.raise_for_status() - return resp.json() - - @staticmethod - def invoke_rerank(server_url: str, query: str, docs: list[str]) -> list[dict]: - """ - Invoke rerank endpoint - - Example response: - [ - { - "index": 0, - "text": "Deep Learning is ...", - "score": 0.9950755 - } - ] - - :param server_url: server url - :param texts: texts to rerank - :param candidates: candidates to rerank - """ - params = {"query": query, "texts": docs, "return_text": True} - - response = httpx.post( - server_url + "/rerank", - json=params, - ) - response.raise_for_status() - return response.json() diff --git a/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/__init__.py b/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/hunyuan/__init__.py b/api/core/model_runtime/model_providers/hunyuan/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/hunyuan/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/hunyuan/_assets/icon_l_en.png deleted file mode 100644 index 1303055ef8..0000000000 Binary files a/api/core/model_runtime/model_providers/hunyuan/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/hunyuan/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/hunyuan/_assets/icon_s_en.png deleted file mode 100644 index 53f4462843..0000000000 Binary files a/api/core/model_runtime/model_providers/hunyuan/_assets/icon_s_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/hunyuan/hunyuan.py b/api/core/model_runtime/model_providers/hunyuan/hunyuan.py deleted file mode 100644 index e65772e7dd..0000000000 --- a/api/core/model_runtime/model_providers/hunyuan/hunyuan.py +++ /dev/null @@ -1,27 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class HunyuanProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `hunyuan-standard` model for validate, - model_instance.validate_credentials(model="hunyuan-standard", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/hunyuan/hunyuan.yaml b/api/core/model_runtime/model_providers/hunyuan/hunyuan.yaml deleted file mode 100644 index 812b51ddcd..0000000000 --- a/api/core/model_runtime/model_providers/hunyuan/hunyuan.yaml +++ /dev/null @@ -1,41 +0,0 @@ -provider: hunyuan -label: - zh_Hans: 腾讯混元 - en_US: Hunyuan -description: - en_US: Models provided by Tencent Hunyuan, such as hunyuan-standard, hunyuan-standard-256k, hunyuan-pro and hunyuan-lite. - zh_Hans: 腾讯混元提供的模型,例如 hunyuan-standard、 hunyuan-standard-256k, hunyuan-pro 和 hunyuan-lite。 -icon_small: - en_US: icon_s_en.png -icon_large: - en_US: icon_l_en.png -background: "#F6F7F7" -help: - title: - en_US: Get your API Key from Tencent Hunyuan - zh_Hans: 从腾讯混元获取 API Key - url: - en_US: https://console.cloud.tencent.com/cam/capi -supported_model_types: - - llm - - text-embedding -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: secret_id - label: - en_US: Secret ID - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 Secret ID - en_US: Enter your Secret ID - - variable: secret_key - label: - en_US: Secret Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 Secret Key - en_US: Enter your Secret Key diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/__init__.py b/api/core/model_runtime/model_providers/hunyuan/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/_position.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/_position.yaml deleted file mode 100644 index f494984443..0000000000 --- a/api/core/model_runtime/model_providers/hunyuan/llm/_position.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- hunyuan-lite -- hunyuan-standard -- hunyuan-standard-256k -- hunyuan-pro -- hunyuan-turbo -- hunyuan-vision diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-lite.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-lite.yaml deleted file mode 100644 index 4f5a5dfb48..0000000000 --- a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-lite.yaml +++ /dev/null @@ -1,28 +0,0 @@ -model: hunyuan-lite -label: - zh_Hans: hunyuan-lite - en_US: hunyuan-lite -model_type: llm -features: - - agent-thought - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 256000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 256000 -pricing: - input: '0.00' - output: '0.00' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-pro.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-pro.yaml deleted file mode 100644 index b173ffbe77..0000000000 --- a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-pro.yaml +++ /dev/null @@ -1,38 +0,0 @@ -model: hunyuan-pro -label: - zh_Hans: hunyuan-pro - en_US: hunyuan-pro -model_type: llm -features: - - agent-thought - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 32000 - - name: enable_enhance - label: - zh_Hans: 功能增强 - en_US: Enable Enhancement - type: boolean - help: - zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。 - en_US: Allow the model to perform external search to enhance the generation results. - required: false - default: true -pricing: - input: '0.03' - output: '0.10' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-standard-256k.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-standard-256k.yaml deleted file mode 100644 index 1f94a8623b..0000000000 --- a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-standard-256k.yaml +++ /dev/null @@ -1,38 +0,0 @@ -model: hunyuan-standard-256k -label: - zh_Hans: hunyuan-standard-256k - en_US: hunyuan-standard-256k -model_type: llm -features: - - agent-thought - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 256000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 256000 - - name: enable_enhance - label: - zh_Hans: 功能增强 - en_US: Enable Enhancement - type: boolean - help: - zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。 - en_US: Allow the model to perform external search to enhance the generation results. - required: false - default: true -pricing: - input: '0.015' - output: '0.06' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-standard.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-standard.yaml deleted file mode 100644 index 1db25930fc..0000000000 --- a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-standard.yaml +++ /dev/null @@ -1,38 +0,0 @@ -model: hunyuan-standard -label: - zh_Hans: hunyuan-standard - en_US: hunyuan-standard -model_type: llm -features: - - agent-thought - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 32000 - - name: enable_enhance - label: - zh_Hans: 功能增强 - en_US: Enable Enhancement - type: boolean - help: - zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。 - en_US: Allow the model to perform external search to enhance the generation results. - required: false - default: true -pricing: - input: '0.0045' - output: '0.0005' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-turbo.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-turbo.yaml deleted file mode 100644 index 4837fed4ba..0000000000 --- a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-turbo.yaml +++ /dev/null @@ -1,38 +0,0 @@ -model: hunyuan-turbo -label: - zh_Hans: hunyuan-turbo - en_US: hunyuan-turbo -model_type: llm -features: - - agent-thought - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 32000 - - name: enable_enhance - label: - zh_Hans: 功能增强 - en_US: Enable Enhancement - type: boolean - help: - zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。 - en_US: Allow the model to perform external search to enhance the generation results. - required: false - default: true -pricing: - input: '0.015' - output: '0.05' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-vision.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-vision.yaml deleted file mode 100644 index 9edc7f4710..0000000000 --- a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-vision.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: hunyuan-vision -label: - zh_Hans: hunyuan-vision - en_US: hunyuan-vision -model_type: llm -features: - - agent-thought - - tool-call - - multi-tool-call - - stream-tool-call - - vision -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 8000 - - name: enable_enhance - label: - zh_Hans: 功能增强 - en_US: Enable Enhancement - type: boolean - help: - zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。 - en_US: Allow the model to perform external search to enhance the generation results. - required: false - default: true -pricing: - input: '0.018' - output: '0.018' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/llm.py b/api/core/model_runtime/model_providers/hunyuan/llm/llm.py deleted file mode 100644 index 2014de8516..0000000000 --- a/api/core/model_runtime/model_providers/hunyuan/llm/llm.py +++ /dev/null @@ -1,348 +0,0 @@ -import json -import logging -from collections.abc import Generator -from typing import cast - -from tencentcloud.common import credential -from tencentcloud.common.exception import TencentCloudSDKException -from tencentcloud.common.profile.client_profile import ClientProfile -from tencentcloud.common.profile.http_profile import HttpProfile -from tencentcloud.hunyuan.v20230901 import hunyuan_client, models - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.invoke import InvokeError -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel - -logger = logging.getLogger(__name__) - - -class HunyuanLargeLanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - client = self._setup_hunyuan_client(credentials) - request = models.ChatCompletionsRequest() - messages_dict = self._convert_prompt_messages_to_dicts(prompt_messages) - - custom_parameters = { - "Temperature": model_parameters.get("temperature", 0.0), - "TopP": model_parameters.get("top_p", 1.0), - "EnableEnhancement": model_parameters.get("enable_enhance", True), - } - - params = { - "Model": model, - "Messages": messages_dict, - "Stream": stream, - **custom_parameters, - } - # add Tools and ToolChoice - if tools and len(tools) > 0: - params["ToolChoice"] = "auto" - params["Tools"] = [ - { - "Type": "function", - "Function": { - "Name": tool.name, - "Description": tool.description, - "Parameters": json.dumps(tool.parameters), - }, - } - for tool in tools - ] - - request.from_json_string(json.dumps(params)) - response = client.ChatCompletions(request) - - if stream: - return self._handle_stream_chat_response(model, credentials, prompt_messages, response) - - return self._handle_chat_response(credentials, model, prompt_messages, response) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate credentials - """ - try: - client = self._setup_hunyuan_client(credentials) - - req = models.ChatCompletionsRequest() - params = { - "Model": model, - "Messages": [{"Role": "user", "Content": "hello"}], - "TopP": 1, - "Temperature": 0, - "Stream": False, - } - req.from_json_string(json.dumps(params)) - client.ChatCompletions(req) - except Exception as e: - raise CredentialsValidateFailedError(f"Credentials validation failed: {e}") - - def _setup_hunyuan_client(self, credentials): - secret_id = credentials["secret_id"] - secret_key = credentials["secret_key"] - cred = credential.Credential(secret_id, secret_key) - httpProfile = HttpProfile() - httpProfile.endpoint = "hunyuan.tencentcloudapi.com" - clientProfile = ClientProfile() - clientProfile.httpProfile = httpProfile - client = hunyuan_client.HunyuanClient(cred, "", clientProfile) - return client - - def _convert_prompt_messages_to_dicts(self, prompt_messages: list[PromptMessage]) -> list[dict]: - """Convert a list of PromptMessage objects to a list of dictionaries with 'Role' and 'Content' keys.""" - dict_list = [] - for message in prompt_messages: - if isinstance(message, AssistantPromptMessage): - tool_calls = message.tool_calls - if tool_calls and len(tool_calls) > 0: - dict_tool_calls = [ - { - "Id": tool_call.id, - "Type": tool_call.type, - "Function": { - "Name": tool_call.function.name, - "Arguments": tool_call.function.arguments - if (tool_call.function.arguments == "") - else "{}", - }, - } - for tool_call in tool_calls - ] - - dict_list.append( - { - "Role": message.role.value, - # fix set content = "" while tool_call request - # fix [hunyuan] None, [TencentCloudSDKException] code:InvalidParameter - # message:Messages Content and Contents not allowed empty at the same time. - "Content": " ", # message.content if (message.content is not None) else "", - "ToolCalls": dict_tool_calls, - } - ) - else: - dict_list.append({"Role": message.role.value, "Content": message.content}) - elif isinstance(message, ToolPromptMessage): - tool_execute_result = {"result": message.content} - content = json.dumps(tool_execute_result, ensure_ascii=False) - dict_list.append({"Role": message.role.value, "Content": content, "ToolCallId": message.tool_call_id}) - elif isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - dict_list.append({"Role": message.role.value, "Content": message.content}) - else: - sub_messages = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - sub_message_dict = {"Type": "text", "Text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - sub_message_dict = { - "Type": "image_url", - "ImageUrl": {"Url": message_content.data}, - } - sub_messages.append(sub_message_dict) - dict_list.append({"Role": message.role.value, "Contents": sub_messages}) - else: - dict_list.append({"Role": message.role.value, "Content": message.content}) - return dict_list - - def _handle_stream_chat_response(self, model, credentials, prompt_messages, resp): - tool_call = None - tool_calls = [] - - for index, event in enumerate(resp): - logging.debug("_handle_stream_chat_response, event: %s", event) - - data_str = event["data"] - data = json.loads(data_str) - - choices = data.get("Choices", []) - if not choices: - continue - choice = choices[0] - delta = choice.get("Delta", {}) - message_content = delta.get("Content", "") - finish_reason = choice.get("FinishReason", "") - - usage = data.get("Usage", {}) - prompt_tokens = usage.get("PromptTokens", 0) - completion_tokens = usage.get("CompletionTokens", 0) - - response_tool_calls = delta.get("ToolCalls") - if response_tool_calls is not None: - new_tool_calls = self._extract_response_tool_calls(response_tool_calls) - if len(new_tool_calls) > 0: - new_tool_call = new_tool_calls[0] - if tool_call is None: - tool_call = new_tool_call - elif tool_call.id != new_tool_call.id: - tool_calls.append(tool_call) - tool_call = new_tool_call - else: - tool_call.function.name += new_tool_call.function.name - tool_call.function.arguments += new_tool_call.function.arguments - if tool_call is not None and len(tool_call.function.name) > 0 and len(tool_call.function.arguments) > 0: - tool_calls.append(tool_call) - tool_call = None - - assistant_prompt_message = AssistantPromptMessage(content=message_content, tool_calls=[]) - # rewrite content = "" while tool_call to avoid show content on web page - if len(tool_calls) > 0: - assistant_prompt_message.content = "" - - # add tool_calls to assistant_prompt_message - if finish_reason == "tool_calls": - assistant_prompt_message.tool_calls = tool_calls - tool_call = None - tool_calls = [] - - if len(finish_reason) > 0: - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - delta_chunk = LLMResultChunkDelta( - index=index, - role=delta.get("Role", "assistant"), - message=assistant_prompt_message, - usage=usage, - finish_reason=finish_reason, - ) - tool_call = None - tool_calls = [] - - else: - delta_chunk = LLMResultChunkDelta( - index=index, - message=assistant_prompt_message, - ) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=delta_chunk, - ) - - def _handle_chat_response(self, credentials, model, prompt_messages, response): - usage = self._calc_response_usage( - model, credentials, response.Usage.PromptTokens, response.Usage.CompletionTokens - ) - assistant_prompt_message = AssistantPromptMessage() - assistant_prompt_message.content = response.Choices[0].Message.Content - result = LLMResult( - model=model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - ) - - return result - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool] | None = None, - ) -> int: - if len(prompt_messages) == 0: - return 0 - prompt = self._convert_messages_to_prompt(prompt_messages) - return self._get_num_tokens_by_gpt2(prompt) - - def _convert_messages_to_prompt(self, messages: list[PromptMessage]) -> str: - """ - Format a list of messages into a full prompt for the Anthropic model - - :param messages: List of PromptMessage to combine. - :return: Combined string with necessary human_prompt and ai_prompt tags. - """ - messages = messages.copy() # don't mutate the original list - - text = "".join(self._convert_one_message_to_text(message) for message in messages) - - # trim off the trailing ' ' that might come from the "Assistant: " - return text.rstrip() - - def _convert_one_message_to_text(self, message: PromptMessage) -> str: - """ - Convert a single message to a string. - - :param message: PromptMessage to convert. - :return: String representation of the message. - """ - human_prompt = "\n\nHuman:" - ai_prompt = "\n\nAssistant:" - tool_prompt = "\n\nTool:" - content = message.content - - if isinstance(message, UserPromptMessage): - message_text = f"{human_prompt} {content}" - elif isinstance(message, AssistantPromptMessage): - message_text = f"{ai_prompt} {content}" - elif isinstance(message, ToolPromptMessage): - message_text = f"{tool_prompt} {content}" - elif isinstance(message, SystemPromptMessage): - message_text = content - else: - raise ValueError(f"Got unknown type {message}") - - return message_text - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeError: [TencentCloudSDKException], - } - - def _extract_response_tool_calls(self, response_tool_calls: list[dict]) -> list[AssistantPromptMessage.ToolCall]: - """ - Extract tool calls from response - - :param response_tool_calls: response tool calls - :return: list of tool calls - """ - tool_calls = [] - if response_tool_calls: - for response_tool_call in response_tool_calls: - response_function = response_tool_call.get("Function", {}) - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_function.get("Name", ""), arguments=response_function.get("Arguments", "") - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_tool_call.get("Id", 0), type="function", function=function - ) - tool_calls.append(tool_call) - - return tool_calls diff --git a/api/core/model_runtime/model_providers/hunyuan/text_embedding/__init__.py b/api/core/model_runtime/model_providers/hunyuan/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/hunyuan/text_embedding/hunyuan-text-embedding.yaml b/api/core/model_runtime/model_providers/hunyuan/text_embedding/hunyuan-text-embedding.yaml deleted file mode 100644 index ab014e4344..0000000000 --- a/api/core/model_runtime/model_providers/hunyuan/text_embedding/hunyuan-text-embedding.yaml +++ /dev/null @@ -1,5 +0,0 @@ -model: hunyuan-embedding -model_type: text-embedding -model_properties: - context_size: 1024 - max_chunks: 1 diff --git a/api/core/model_runtime/model_providers/jina/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/jina/_assets/icon_l_en.svg deleted file mode 100644 index 6a241fc9ae..0000000000 --- a/api/core/model_runtime/model_providers/jina/_assets/icon_l_en.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/jina/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/jina/_assets/icon_s_en.svg deleted file mode 100644 index 2e1b00fa52..0000000000 --- a/api/core/model_runtime/model_providers/jina/_assets/icon_s_en.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/api/core/model_runtime/model_providers/jina/jina.py b/api/core/model_runtime/model_providers/jina/jina.py deleted file mode 100644 index 186a0a0fa7..0000000000 --- a/api/core/model_runtime/model_providers/jina/jina.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class JinaProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.TEXT_EMBEDDING) - - # Use `jina-embeddings-v3` model for validate, - # no matter what model you pass in, text completion model or chat model - model_instance.validate_credentials(model="jina-embeddings-v3", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/jina/rerank/__init__.py b/api/core/model_runtime/model_providers/jina/rerank/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/jina/rerank/_position.yaml b/api/core/model_runtime/model_providers/jina/rerank/_position.yaml deleted file mode 100644 index c9ddaad758..0000000000 --- a/api/core/model_runtime/model_providers/jina/rerank/_position.yaml +++ /dev/null @@ -1,5 +0,0 @@ -- jina-reranker-v2-base-multilingual -- jina-reranker-v1-base-en -- jina-reranker-v1-turbo-en -- jina-colbert-v1-en -- jina-reranker-v1-tiny-en diff --git a/api/core/model_runtime/model_providers/jina/rerank/jina-colbert-v1-en.yaml b/api/core/model_runtime/model_providers/jina/rerank/jina-colbert-v1-en.yaml deleted file mode 100644 index 320370f242..0000000000 --- a/api/core/model_runtime/model_providers/jina/rerank/jina-colbert-v1-en.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: jina-colbert-v1-en -model_type: rerank -model_properties: - context_size: 8192 diff --git a/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v1-base-en.yaml b/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v1-base-en.yaml deleted file mode 100644 index bd3f31fbd1..0000000000 --- a/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v1-base-en.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: jina-reranker-v1-base-en -model_type: rerank -model_properties: - context_size: 8192 diff --git a/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v1-tiny-en.yaml b/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v1-tiny-en.yaml deleted file mode 100644 index b81711195b..0000000000 --- a/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v1-tiny-en.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: jina-reranker-v1-tiny-en -model_type: rerank -model_properties: - context_size: 8192 diff --git a/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v1-turbo-en.yaml b/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v1-turbo-en.yaml deleted file mode 100644 index d05f4bb4a2..0000000000 --- a/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v1-turbo-en.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: jina-reranker-v1-turbo-en -model_type: rerank -model_properties: - context_size: 8192 diff --git a/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v2-base-multilingual.yaml b/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v2-base-multilingual.yaml deleted file mode 100644 index e6af62107e..0000000000 --- a/api/core/model_runtime/model_providers/jina/rerank/jina-reranker-v2-base-multilingual.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: jina-reranker-v2-base-multilingual -model_type: rerank -model_properties: - context_size: 1024 diff --git a/api/core/model_runtime/model_providers/jina/rerank/rerank.py b/api/core/model_runtime/model_providers/jina/rerank/rerank.py deleted file mode 100644 index 79ca68914f..0000000000 --- a/api/core/model_runtime/model_providers/jina/rerank/rerank.py +++ /dev/null @@ -1,125 +0,0 @@ -from typing import Optional - -import httpx - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType -from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.rerank_model import RerankModel - - -class JinaRerankModel(RerankModel): - """ - Model class for Jina rerank model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - query: str, - docs: list[str], - score_threshold: Optional[float] = None, - top_n: Optional[int] = None, - user: Optional[str] = None, - ) -> RerankResult: - """ - Invoke rerank model - - :param model: model name - :param credentials: model credentials - :param query: search query - :param docs: docs for reranking - :param score_threshold: score threshold - :param top_n: top n documents to return - :param user: unique user id - :return: rerank result - """ - if len(docs) == 0: - return RerankResult(model=model, docs=[]) - - base_url = credentials.get("base_url", "https://api.jina.ai/v1") - base_url = base_url.removesuffix("/") - - try: - response = httpx.post( - base_url + "/rerank", - json={"model": model, "query": query, "documents": docs, "top_n": top_n}, - headers={"Authorization": f"Bearer {credentials.get('api_key')}"}, - ) - response.raise_for_status() - results = response.json() - - rerank_documents = [] - for result in results["results"]: - rerank_document = RerankDocument( - index=result["index"], - text=result["document"]["text"], - score=result["relevance_score"], - ) - if score_threshold is None or result["relevance_score"] >= score_threshold: - rerank_documents.append(rerank_document) - - return RerankResult(model=model, docs=rerank_documents) - except httpx.HTTPStatusError as e: - raise InvokeServerUnavailableError(str(e)) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke( - model=model, - credentials=credentials, - query="What is the capital of the United States?", - docs=[ - "Carson City is the capital city of the American state of Nevada. At the 2010 United States " - "Census, Carson City had a population of 55,274.", - "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " - "are a political division controlled by the United States. Its capital is Saipan.", - ], - score_threshold=0.8, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - """ - return { - InvokeConnectionError: [httpx.ConnectError], - InvokeServerUnavailableError: [httpx.RemoteProtocolError], - InvokeRateLimitError: [], - InvokeAuthorizationError: [httpx.HTTPStatusError], - InvokeBadRequestError: [httpx.RequestError], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - generate custom model entities from credentials - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - model_type=ModelType.RERANK, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))}, - ) - - return entity diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/jina-clip-v1.yaml b/api/core/model_runtime/model_providers/jina/text_embedding/jina-clip-v1.yaml deleted file mode 100644 index c06bfd7ebe..0000000000 --- a/api/core/model_runtime/model_providers/jina/text_embedding/jina-clip-v1.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: jina-clip-v1 -model_type: text-embedding -model_properties: - context_size: 8192 - max_chunks: 2048 -pricing: - input: '0.001' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-base-de.yaml b/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-base-de.yaml deleted file mode 100644 index 09f7023acb..0000000000 --- a/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-base-de.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: jina-embeddings-v2-base-de -model_type: text-embedding -model_properties: - context_size: 8192 - max_chunks: 2048 -pricing: - input: '0.001' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-base-en.yaml b/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-base-en.yaml deleted file mode 100644 index a9b2cd4efb..0000000000 --- a/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-base-en.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: jina-embeddings-v2-base-en -model_type: text-embedding -model_properties: - context_size: 8192 - max_chunks: 2048 -pricing: - input: '0.001' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-base-zh.yaml b/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-base-zh.yaml deleted file mode 100644 index 2a66b4729b..0000000000 --- a/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-base-zh.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: jina-embeddings-v2-base-zh -model_type: text-embedding -model_properties: - context_size: 8192 - max_chunks: 2048 -pricing: - input: '0.001' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-small-en.yaml b/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-small-en.yaml deleted file mode 100644 index c92779d499..0000000000 --- a/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v2-small-en.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: jina-embeddings-v2-small-en -model_type: text-embedding -model_properties: - context_size: 8192 - max_chunks: 2048 -pricing: - input: '0.001' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v3.yaml b/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v3.yaml deleted file mode 100644 index 4e5374dc9d..0000000000 --- a/api/core/model_runtime/model_providers/jina/text_embedding/jina-embeddings-v3.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: jina-embeddings-v3 -model_type: text-embedding -model_properties: - context_size: 8192 - max_chunks: 2048 -pricing: - input: '0.001' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/jina_tokenizer.py b/api/core/model_runtime/model_providers/jina/text_embedding/jina_tokenizer.py deleted file mode 100644 index d80cbfa83d..0000000000 --- a/api/core/model_runtime/model_providers/jina/text_embedding/jina_tokenizer.py +++ /dev/null @@ -1,32 +0,0 @@ -from os.path import abspath, dirname, join -from threading import Lock - -from transformers import AutoTokenizer - - -class JinaTokenizer: - _tokenizer = None - _lock = Lock() - - @classmethod - def _get_tokenizer(cls): - if cls._tokenizer is None: - with cls._lock: - if cls._tokenizer is None: - base_path = abspath(__file__) - gpt2_tokenizer_path = join(dirname(base_path), "tokenizer") - cls._tokenizer = AutoTokenizer.from_pretrained(gpt2_tokenizer_path) - return cls._tokenizer - - @classmethod - def _get_num_tokens_by_jina_base(cls, text: str) -> int: - """ - use jina tokenizer to get num tokens - """ - tokenizer = cls._get_tokenizer() - tokens = tokenizer.encode(text) - return len(tokens) - - @classmethod - def get_num_tokens(cls, text: str) -> int: - return cls._get_num_tokens_by_jina_base(text) diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/tokenizer/tokenizer.json b/api/core/model_runtime/model_providers/jina/text_embedding/tokenizer/tokenizer.json deleted file mode 100644 index 65f2b2bc19..0000000000 --- a/api/core/model_runtime/model_providers/jina/text_embedding/tokenizer/tokenizer.json +++ /dev/null @@ -1,30678 +0,0 @@ -{ - "version": "1.0", - "truncation": null, - "padding": null, - "added_tokens": [ - { - "id": 0, - "content": "[PAD]", - "single_word": false, - "lstrip": false, - "rstrip": false, - "normalized": false, - "special": true - }, - { - "id": 100, - "content": "[UNK]", - "single_word": false, - "lstrip": false, - "rstrip": false, - "normalized": false, - "special": true - }, - { - "id": 101, - "content": "[CLS]", - "single_word": false, - "lstrip": false, - "rstrip": false, - "normalized": false, - "special": true - }, - { - "id": 102, - "content": "[SEP]", - "single_word": false, - "lstrip": false, - "rstrip": false, - "normalized": false, - "special": true - }, - { - "id": 103, - "content": "[MASK]", - "single_word": false, - "lstrip": false, - "rstrip": false, - "normalized": false, - "special": true - } - ], - "normalizer": { - "type": "BertNormalizer", - "clean_text": true, - "handle_chinese_chars": true, - "strip_accents": null, - "lowercase": true - }, - "pre_tokenizer": { - "type": "BertPreTokenizer" - }, - "post_processor": { - "type": "TemplateProcessing", - "single": [ - { - "SpecialToken": { - "id": "[CLS]", - "type_id": 0 - } - }, - { - "Sequence": { - "id": "A", - "type_id": 0 - } - }, - { - "SpecialToken": { - "id": "[SEP]", - "type_id": 0 - } - } - ], - "pair": [ - { - "SpecialToken": { - "id": "[CLS]", - "type_id": 0 - } - }, - { - "Sequence": { - "id": "A", - "type_id": 0 - } - }, - { - "SpecialToken": { - "id": "[SEP]", - "type_id": 0 - } - }, - { - "Sequence": { - "id": "B", - "type_id": 1 - } - }, - { - "SpecialToken": { - "id": "[SEP]", - "type_id": 1 - } - } - ], - "special_tokens": { - "[CLS]": { - "id": "[CLS]", - "ids": [ - 101 - ], - "tokens": [ - "[CLS]" - ] - }, - "[SEP]": { - "id": "[SEP]", - "ids": [ - 102 - ], - "tokens": [ - "[SEP]" - ] - } - } - }, - "decoder": { - "type": "WordPiece", - "prefix": "##", - "cleanup": true - }, - "model": { - "type": "WordPiece", - "unk_token": "[UNK]", - "continuing_subword_prefix": "##", - "max_input_chars_per_word": 100, - "vocab": { - "[PAD]": 0, - "[unused0]": 1, - "[unused1]": 2, - "[unused2]": 3, - "[unused3]": 4, - "[unused4]": 5, - "[unused5]": 6, - "[unused6]": 7, - "[unused7]": 8, - "[unused8]": 9, - "[unused9]": 10, - "[unused10]": 11, - "[unused11]": 12, - "[unused12]": 13, - "[unused13]": 14, - "[unused14]": 15, - "[unused15]": 16, - "[unused16]": 17, - "[unused17]": 18, - "[unused18]": 19, - "[unused19]": 20, - "[unused20]": 21, - "[unused21]": 22, - "[unused22]": 23, - "[unused23]": 24, - "[unused24]": 25, - "[unused25]": 26, - "[unused26]": 27, - "[unused27]": 28, - "[unused28]": 29, - "[unused29]": 30, - "[unused30]": 31, - "[unused31]": 32, - "[unused32]": 33, - "[unused33]": 34, - "[unused34]": 35, - "[unused35]": 36, - "[unused36]": 37, - "[unused37]": 38, - "[unused38]": 39, - "[unused39]": 40, - "[unused40]": 41, - "[unused41]": 42, - "[unused42]": 43, - "[unused43]": 44, - "[unused44]": 45, - "[unused45]": 46, - "[unused46]": 47, - "[unused47]": 48, - "[unused48]": 49, - "[unused49]": 50, - "[unused50]": 51, - "[unused51]": 52, - "[unused52]": 53, - "[unused53]": 54, - "[unused54]": 55, - "[unused55]": 56, - "[unused56]": 57, - "[unused57]": 58, - "[unused58]": 59, - "[unused59]": 60, - "[unused60]": 61, - "[unused61]": 62, - "[unused62]": 63, - "[unused63]": 64, - "[unused64]": 65, - "[unused65]": 66, - "[unused66]": 67, - "[unused67]": 68, - "[unused68]": 69, - "[unused69]": 70, - "[unused70]": 71, - "[unused71]": 72, - "[unused72]": 73, - "[unused73]": 74, - "[unused74]": 75, - "[unused75]": 76, - "[unused76]": 77, - "[unused77]": 78, - "[unused78]": 79, - "[unused79]": 80, - "[unused80]": 81, - "[unused81]": 82, - "[unused82]": 83, - "[unused83]": 84, - "[unused84]": 85, - "[unused85]": 86, - "[unused86]": 87, - "[unused87]": 88, - "[unused88]": 89, - "[unused89]": 90, - "[unused90]": 91, - "[unused91]": 92, - "[unused92]": 93, - "[unused93]": 94, - "[unused94]": 95, - "[unused95]": 96, - "[unused96]": 97, - "[unused97]": 98, - "[unused98]": 99, - "[UNK]": 100, - "[CLS]": 101, - "[SEP]": 102, - "[MASK]": 103, - "[unused99]": 104, - "[unused100]": 105, - "[unused101]": 106, - "[unused102]": 107, - "[unused103]": 108, - "[unused104]": 109, - "[unused105]": 110, - "[unused106]": 111, - "[unused107]": 112, - "[unused108]": 113, - "[unused109]": 114, - "[unused110]": 115, - "[unused111]": 116, - "[unused112]": 117, - "[unused113]": 118, - "[unused114]": 119, - "[unused115]": 120, - "[unused116]": 121, - "[unused117]": 122, - "[unused118]": 123, - "[unused119]": 124, - "[unused120]": 125, - "[unused121]": 126, - "[unused122]": 127, - "[unused123]": 128, - "[unused124]": 129, - "[unused125]": 130, - "[unused126]": 131, - "[unused127]": 132, - "[unused128]": 133, - "[unused129]": 134, - "[unused130]": 135, - "[unused131]": 136, - "[unused132]": 137, - "[unused133]": 138, - "[unused134]": 139, - "[unused135]": 140, - "[unused136]": 141, - "[unused137]": 142, - "[unused138]": 143, - "[unused139]": 144, - "[unused140]": 145, - "[unused141]": 146, - "[unused142]": 147, - "[unused143]": 148, - "[unused144]": 149, - "[unused145]": 150, - "[unused146]": 151, - "[unused147]": 152, - "[unused148]": 153, - "[unused149]": 154, - "[unused150]": 155, - "[unused151]": 156, - "[unused152]": 157, - "[unused153]": 158, - "[unused154]": 159, - "[unused155]": 160, - "[unused156]": 161, - "[unused157]": 162, - "[unused158]": 163, - "[unused159]": 164, - "[unused160]": 165, - "[unused161]": 166, - "[unused162]": 167, - "[unused163]": 168, - "[unused164]": 169, - "[unused165]": 170, - "[unused166]": 171, - "[unused167]": 172, - "[unused168]": 173, - "[unused169]": 174, - "[unused170]": 175, - "[unused171]": 176, - "[unused172]": 177, - "[unused173]": 178, - "[unused174]": 179, - "[unused175]": 180, - "[unused176]": 181, - "[unused177]": 182, - "[unused178]": 183, - "[unused179]": 184, - "[unused180]": 185, - "[unused181]": 186, - "[unused182]": 187, - "[unused183]": 188, - "[unused184]": 189, - "[unused185]": 190, - "[unused186]": 191, - "[unused187]": 192, - "[unused188]": 193, - "[unused189]": 194, - "[unused190]": 195, - "[unused191]": 196, - "[unused192]": 197, - "[unused193]": 198, - "[unused194]": 199, - "[unused195]": 200, - "[unused196]": 201, - "[unused197]": 202, - "[unused198]": 203, - "[unused199]": 204, - "[unused200]": 205, - "[unused201]": 206, - "[unused202]": 207, - "[unused203]": 208, - "[unused204]": 209, - "[unused205]": 210, - "[unused206]": 211, - "[unused207]": 212, - "[unused208]": 213, - "[unused209]": 214, - "[unused210]": 215, - "[unused211]": 216, - "[unused212]": 217, - "[unused213]": 218, - "[unused214]": 219, - "[unused215]": 220, - "[unused216]": 221, - "[unused217]": 222, - "[unused218]": 223, - "[unused219]": 224, - "[unused220]": 225, - "[unused221]": 226, - "[unused222]": 227, - "[unused223]": 228, - "[unused224]": 229, - "[unused225]": 230, - "[unused226]": 231, - "[unused227]": 232, - "[unused228]": 233, - "[unused229]": 234, - "[unused230]": 235, - "[unused231]": 236, - "[unused232]": 237, - "[unused233]": 238, - "[unused234]": 239, - "[unused235]": 240, - "[unused236]": 241, - "[unused237]": 242, - "[unused238]": 243, - "[unused239]": 244, - "[unused240]": 245, - "[unused241]": 246, - "[unused242]": 247, - "[unused243]": 248, - "[unused244]": 249, - "[unused245]": 250, - "[unused246]": 251, - "[unused247]": 252, - "[unused248]": 253, - "[unused249]": 254, - "[unused250]": 255, - "[unused251]": 256, - "[unused252]": 257, - "[unused253]": 258, - "[unused254]": 259, - "[unused255]": 260, - "[unused256]": 261, - "[unused257]": 262, - "[unused258]": 263, - "[unused259]": 264, - "[unused260]": 265, - "[unused261]": 266, - "[unused262]": 267, - "[unused263]": 268, - "[unused264]": 269, - "[unused265]": 270, - "[unused266]": 271, - "[unused267]": 272, - "[unused268]": 273, - "[unused269]": 274, - "[unused270]": 275, - "[unused271]": 276, - "[unused272]": 277, - "[unused273]": 278, - "[unused274]": 279, - "[unused275]": 280, - "[unused276]": 281, - "[unused277]": 282, - "[unused278]": 283, - "[unused279]": 284, - "[unused280]": 285, - "[unused281]": 286, - "[unused282]": 287, - "[unused283]": 288, - "[unused284]": 289, - "[unused285]": 290, - "[unused286]": 291, - "[unused287]": 292, - "[unused288]": 293, - "[unused289]": 294, - "[unused290]": 295, - "[unused291]": 296, - "[unused292]": 297, - "[unused293]": 298, - "[unused294]": 299, - "[unused295]": 300, - "[unused296]": 301, - "[unused297]": 302, - "[unused298]": 303, - "[unused299]": 304, - "[unused300]": 305, - "[unused301]": 306, - "[unused302]": 307, - "[unused303]": 308, - "[unused304]": 309, - "[unused305]": 310, - "[unused306]": 311, - "[unused307]": 312, - "[unused308]": 313, - "[unused309]": 314, - "[unused310]": 315, - "[unused311]": 316, - "[unused312]": 317, - "[unused313]": 318, - "[unused314]": 319, - "[unused315]": 320, - "[unused316]": 321, - "[unused317]": 322, - "[unused318]": 323, - "[unused319]": 324, - "[unused320]": 325, - "[unused321]": 326, - "[unused322]": 327, - "[unused323]": 328, - "[unused324]": 329, - "[unused325]": 330, - "[unused326]": 331, - "[unused327]": 332, - "[unused328]": 333, - "[unused329]": 334, - "[unused330]": 335, - "[unused331]": 336, - "[unused332]": 337, - "[unused333]": 338, - "[unused334]": 339, - "[unused335]": 340, - "[unused336]": 341, - "[unused337]": 342, - "[unused338]": 343, - "[unused339]": 344, - "[unused340]": 345, - "[unused341]": 346, - "[unused342]": 347, - "[unused343]": 348, - "[unused344]": 349, - "[unused345]": 350, - "[unused346]": 351, - "[unused347]": 352, - "[unused348]": 353, - "[unused349]": 354, - "[unused350]": 355, - "[unused351]": 356, - "[unused352]": 357, - "[unused353]": 358, - "[unused354]": 359, - "[unused355]": 360, - "[unused356]": 361, - "[unused357]": 362, - "[unused358]": 363, - "[unused359]": 364, - "[unused360]": 365, - "[unused361]": 366, - "[unused362]": 367, - "[unused363]": 368, - "[unused364]": 369, - "[unused365]": 370, - "[unused366]": 371, - "[unused367]": 372, - "[unused368]": 373, - "[unused369]": 374, - "[unused370]": 375, - "[unused371]": 376, - "[unused372]": 377, - "[unused373]": 378, - "[unused374]": 379, - "[unused375]": 380, - "[unused376]": 381, - "[unused377]": 382, - "[unused378]": 383, - "[unused379]": 384, - "[unused380]": 385, - "[unused381]": 386, - "[unused382]": 387, - "[unused383]": 388, - "[unused384]": 389, - "[unused385]": 390, - "[unused386]": 391, - "[unused387]": 392, - "[unused388]": 393, - "[unused389]": 394, - "[unused390]": 395, - "[unused391]": 396, - "[unused392]": 397, - "[unused393]": 398, - "[unused394]": 399, - "[unused395]": 400, - "[unused396]": 401, - "[unused397]": 402, - "[unused398]": 403, - "[unused399]": 404, - "[unused400]": 405, - "[unused401]": 406, - "[unused402]": 407, - "[unused403]": 408, - "[unused404]": 409, - "[unused405]": 410, - "[unused406]": 411, - "[unused407]": 412, - "[unused408]": 413, - "[unused409]": 414, - "[unused410]": 415, - "[unused411]": 416, - "[unused412]": 417, - "[unused413]": 418, - "[unused414]": 419, - "[unused415]": 420, - "[unused416]": 421, - "[unused417]": 422, - "[unused418]": 423, - "[unused419]": 424, - "[unused420]": 425, - "[unused421]": 426, - "[unused422]": 427, - "[unused423]": 428, - "[unused424]": 429, - "[unused425]": 430, - "[unused426]": 431, - "[unused427]": 432, - "[unused428]": 433, - "[unused429]": 434, - "[unused430]": 435, - "[unused431]": 436, - "[unused432]": 437, - "[unused433]": 438, - "[unused434]": 439, - "[unused435]": 440, - "[unused436]": 441, - "[unused437]": 442, - "[unused438]": 443, - "[unused439]": 444, - "[unused440]": 445, - "[unused441]": 446, - "[unused442]": 447, - "[unused443]": 448, - "[unused444]": 449, - "[unused445]": 450, - "[unused446]": 451, - "[unused447]": 452, - "[unused448]": 453, - "[unused449]": 454, - "[unused450]": 455, - "[unused451]": 456, - "[unused452]": 457, - "[unused453]": 458, - "[unused454]": 459, - "[unused455]": 460, - "[unused456]": 461, - "[unused457]": 462, - "[unused458]": 463, - "[unused459]": 464, - "[unused460]": 465, - "[unused461]": 466, - "[unused462]": 467, - "[unused463]": 468, - "[unused464]": 469, - "[unused465]": 470, - "[unused466]": 471, - "[unused467]": 472, - "[unused468]": 473, - "[unused469]": 474, - "[unused470]": 475, - "[unused471]": 476, - "[unused472]": 477, - "[unused473]": 478, - "[unused474]": 479, - "[unused475]": 480, - "[unused476]": 481, - "[unused477]": 482, - "[unused478]": 483, - "[unused479]": 484, - "[unused480]": 485, - "[unused481]": 486, - "[unused482]": 487, - "[unused483]": 488, - "[unused484]": 489, - "[unused485]": 490, - "[unused486]": 491, - "[unused487]": 492, - "[unused488]": 493, - "[unused489]": 494, - "[unused490]": 495, - "[unused491]": 496, - "[unused492]": 497, - "[unused493]": 498, - "[unused494]": 499, - "[unused495]": 500, - "[unused496]": 501, - "[unused497]": 502, - "[unused498]": 503, - "[unused499]": 504, - "[unused500]": 505, - "[unused501]": 506, - "[unused502]": 507, - "[unused503]": 508, - "[unused504]": 509, - "[unused505]": 510, - "[unused506]": 511, - "[unused507]": 512, - "[unused508]": 513, - "[unused509]": 514, - "[unused510]": 515, - "[unused511]": 516, - "[unused512]": 517, - "[unused513]": 518, - "[unused514]": 519, - "[unused515]": 520, - "[unused516]": 521, - "[unused517]": 522, - "[unused518]": 523, - "[unused519]": 524, - "[unused520]": 525, - "[unused521]": 526, - "[unused522]": 527, - "[unused523]": 528, - "[unused524]": 529, - "[unused525]": 530, - "[unused526]": 531, - "[unused527]": 532, - "[unused528]": 533, - "[unused529]": 534, - "[unused530]": 535, - "[unused531]": 536, - "[unused532]": 537, - "[unused533]": 538, - "[unused534]": 539, - "[unused535]": 540, - "[unused536]": 541, - "[unused537]": 542, - "[unused538]": 543, - "[unused539]": 544, - "[unused540]": 545, - "[unused541]": 546, - "[unused542]": 547, - "[unused543]": 548, - "[unused544]": 549, - "[unused545]": 550, - "[unused546]": 551, - "[unused547]": 552, - "[unused548]": 553, - "[unused549]": 554, - "[unused550]": 555, - "[unused551]": 556, - "[unused552]": 557, - "[unused553]": 558, - "[unused554]": 559, - "[unused555]": 560, - "[unused556]": 561, - "[unused557]": 562, - "[unused558]": 563, - "[unused559]": 564, - "[unused560]": 565, - "[unused561]": 566, - "[unused562]": 567, - "[unused563]": 568, - "[unused564]": 569, - "[unused565]": 570, - "[unused566]": 571, - "[unused567]": 572, - "[unused568]": 573, - "[unused569]": 574, - "[unused570]": 575, - "[unused571]": 576, - "[unused572]": 577, - "[unused573]": 578, - "[unused574]": 579, - "[unused575]": 580, - "[unused576]": 581, - "[unused577]": 582, - "[unused578]": 583, - "[unused579]": 584, - "[unused580]": 585, - "[unused581]": 586, - "[unused582]": 587, - "[unused583]": 588, - "[unused584]": 589, - "[unused585]": 590, - "[unused586]": 591, - "[unused587]": 592, - "[unused588]": 593, - "[unused589]": 594, - "[unused590]": 595, - "[unused591]": 596, - "[unused592]": 597, - "[unused593]": 598, - "[unused594]": 599, - "[unused595]": 600, - "[unused596]": 601, - "[unused597]": 602, - "[unused598]": 603, - "[unused599]": 604, - "[unused600]": 605, - "[unused601]": 606, - "[unused602]": 607, - "[unused603]": 608, - "[unused604]": 609, - "[unused605]": 610, - "[unused606]": 611, - "[unused607]": 612, - "[unused608]": 613, - "[unused609]": 614, - "[unused610]": 615, - "[unused611]": 616, - "[unused612]": 617, - "[unused613]": 618, - "[unused614]": 619, - "[unused615]": 620, - "[unused616]": 621, - "[unused617]": 622, - "[unused618]": 623, - "[unused619]": 624, - "[unused620]": 625, - "[unused621]": 626, - "[unused622]": 627, - "[unused623]": 628, - "[unused624]": 629, - "[unused625]": 630, - "[unused626]": 631, - "[unused627]": 632, - "[unused628]": 633, - "[unused629]": 634, - "[unused630]": 635, - "[unused631]": 636, - "[unused632]": 637, - "[unused633]": 638, - "[unused634]": 639, - "[unused635]": 640, - "[unused636]": 641, - "[unused637]": 642, - "[unused638]": 643, - "[unused639]": 644, - "[unused640]": 645, - "[unused641]": 646, - "[unused642]": 647, - "[unused643]": 648, - "[unused644]": 649, - "[unused645]": 650, - "[unused646]": 651, - "[unused647]": 652, - "[unused648]": 653, - "[unused649]": 654, - "[unused650]": 655, - "[unused651]": 656, - "[unused652]": 657, - "[unused653]": 658, - "[unused654]": 659, - "[unused655]": 660, - "[unused656]": 661, - "[unused657]": 662, - "[unused658]": 663, - "[unused659]": 664, - "[unused660]": 665, - "[unused661]": 666, - "[unused662]": 667, - "[unused663]": 668, - "[unused664]": 669, - "[unused665]": 670, - "[unused666]": 671, - "[unused667]": 672, - "[unused668]": 673, - "[unused669]": 674, - "[unused670]": 675, - "[unused671]": 676, - "[unused672]": 677, - "[unused673]": 678, - "[unused674]": 679, - "[unused675]": 680, - "[unused676]": 681, - "[unused677]": 682, - "[unused678]": 683, - "[unused679]": 684, - "[unused680]": 685, - "[unused681]": 686, - "[unused682]": 687, - "[unused683]": 688, - "[unused684]": 689, - "[unused685]": 690, - "[unused686]": 691, - "[unused687]": 692, - "[unused688]": 693, - "[unused689]": 694, - "[unused690]": 695, - "[unused691]": 696, - "[unused692]": 697, - "[unused693]": 698, - "[unused694]": 699, - "[unused695]": 700, - "[unused696]": 701, - "[unused697]": 702, - "[unused698]": 703, - "[unused699]": 704, - "[unused700]": 705, - "[unused701]": 706, - "[unused702]": 707, - "[unused703]": 708, - "[unused704]": 709, - "[unused705]": 710, - "[unused706]": 711, - "[unused707]": 712, - "[unused708]": 713, - "[unused709]": 714, - "[unused710]": 715, - "[unused711]": 716, - "[unused712]": 717, - "[unused713]": 718, - "[unused714]": 719, - "[unused715]": 720, - "[unused716]": 721, - "[unused717]": 722, - "[unused718]": 723, - "[unused719]": 724, - "[unused720]": 725, - "[unused721]": 726, - "[unused722]": 727, - "[unused723]": 728, - "[unused724]": 729, - "[unused725]": 730, - "[unused726]": 731, - "[unused727]": 732, - "[unused728]": 733, - "[unused729]": 734, - "[unused730]": 735, - "[unused731]": 736, - "[unused732]": 737, - "[unused733]": 738, - "[unused734]": 739, - "[unused735]": 740, - "[unused736]": 741, - "[unused737]": 742, - "[unused738]": 743, - "[unused739]": 744, - "[unused740]": 745, - "[unused741]": 746, - "[unused742]": 747, - "[unused743]": 748, - "[unused744]": 749, - "[unused745]": 750, - "[unused746]": 751, - "[unused747]": 752, - "[unused748]": 753, - "[unused749]": 754, - "[unused750]": 755, - "[unused751]": 756, - "[unused752]": 757, - "[unused753]": 758, - "[unused754]": 759, - "[unused755]": 760, - "[unused756]": 761, - "[unused757]": 762, - "[unused758]": 763, - "[unused759]": 764, - "[unused760]": 765, - "[unused761]": 766, - "[unused762]": 767, - "[unused763]": 768, - "[unused764]": 769, - "[unused765]": 770, - "[unused766]": 771, - "[unused767]": 772, - "[unused768]": 773, - "[unused769]": 774, - "[unused770]": 775, - "[unused771]": 776, - "[unused772]": 777, - "[unused773]": 778, - "[unused774]": 779, - "[unused775]": 780, - "[unused776]": 781, - "[unused777]": 782, - "[unused778]": 783, - "[unused779]": 784, - "[unused780]": 785, - "[unused781]": 786, - "[unused782]": 787, - "[unused783]": 788, - "[unused784]": 789, - "[unused785]": 790, - "[unused786]": 791, - "[unused787]": 792, - "[unused788]": 793, - "[unused789]": 794, - "[unused790]": 795, - "[unused791]": 796, - "[unused792]": 797, - "[unused793]": 798, - "[unused794]": 799, - "[unused795]": 800, - "[unused796]": 801, - "[unused797]": 802, - "[unused798]": 803, - "[unused799]": 804, - "[unused800]": 805, - "[unused801]": 806, - "[unused802]": 807, - "[unused803]": 808, - "[unused804]": 809, - "[unused805]": 810, - "[unused806]": 811, - "[unused807]": 812, - "[unused808]": 813, - "[unused809]": 814, - "[unused810]": 815, - "[unused811]": 816, - "[unused812]": 817, - "[unused813]": 818, - "[unused814]": 819, - "[unused815]": 820, - "[unused816]": 821, - "[unused817]": 822, - "[unused818]": 823, - "[unused819]": 824, - "[unused820]": 825, - "[unused821]": 826, - "[unused822]": 827, - "[unused823]": 828, - "[unused824]": 829, - "[unused825]": 830, - "[unused826]": 831, - "[unused827]": 832, - "[unused828]": 833, - "[unused829]": 834, - "[unused830]": 835, - "[unused831]": 836, - "[unused832]": 837, - "[unused833]": 838, - "[unused834]": 839, - "[unused835]": 840, - "[unused836]": 841, - "[unused837]": 842, - "[unused838]": 843, - "[unused839]": 844, - "[unused840]": 845, - "[unused841]": 846, - "[unused842]": 847, - "[unused843]": 848, - "[unused844]": 849, - "[unused845]": 850, - "[unused846]": 851, - "[unused847]": 852, - "[unused848]": 853, - "[unused849]": 854, - "[unused850]": 855, - "[unused851]": 856, - "[unused852]": 857, - "[unused853]": 858, - "[unused854]": 859, - "[unused855]": 860, - "[unused856]": 861, - "[unused857]": 862, - "[unused858]": 863, - "[unused859]": 864, - "[unused860]": 865, - "[unused861]": 866, - "[unused862]": 867, - "[unused863]": 868, - "[unused864]": 869, - "[unused865]": 870, - "[unused866]": 871, - "[unused867]": 872, - "[unused868]": 873, - "[unused869]": 874, - "[unused870]": 875, - "[unused871]": 876, - "[unused872]": 877, - "[unused873]": 878, - "[unused874]": 879, - "[unused875]": 880, - "[unused876]": 881, - "[unused877]": 882, - "[unused878]": 883, - "[unused879]": 884, - "[unused880]": 885, - "[unused881]": 886, - "[unused882]": 887, - "[unused883]": 888, - "[unused884]": 889, - "[unused885]": 890, - "[unused886]": 891, - "[unused887]": 892, - "[unused888]": 893, - "[unused889]": 894, - "[unused890]": 895, - "[unused891]": 896, - "[unused892]": 897, - "[unused893]": 898, - "[unused894]": 899, - "[unused895]": 900, - "[unused896]": 901, - "[unused897]": 902, - "[unused898]": 903, - "[unused899]": 904, - "[unused900]": 905, - "[unused901]": 906, - "[unused902]": 907, - "[unused903]": 908, - "[unused904]": 909, - "[unused905]": 910, - "[unused906]": 911, - "[unused907]": 912, - "[unused908]": 913, - "[unused909]": 914, - "[unused910]": 915, - "[unused911]": 916, - "[unused912]": 917, - "[unused913]": 918, - "[unused914]": 919, - "[unused915]": 920, - "[unused916]": 921, - "[unused917]": 922, - "[unused918]": 923, - "[unused919]": 924, - "[unused920]": 925, - "[unused921]": 926, - "[unused922]": 927, - "[unused923]": 928, - "[unused924]": 929, - "[unused925]": 930, - "[unused926]": 931, - "[unused927]": 932, - "[unused928]": 933, - "[unused929]": 934, - "[unused930]": 935, - "[unused931]": 936, - "[unused932]": 937, - "[unused933]": 938, - "[unused934]": 939, - "[unused935]": 940, - "[unused936]": 941, - "[unused937]": 942, - "[unused938]": 943, - "[unused939]": 944, - "[unused940]": 945, - "[unused941]": 946, - "[unused942]": 947, - "[unused943]": 948, - "[unused944]": 949, - "[unused945]": 950, - "[unused946]": 951, - "[unused947]": 952, - "[unused948]": 953, - "[unused949]": 954, - "[unused950]": 955, - "[unused951]": 956, - "[unused952]": 957, - "[unused953]": 958, - "[unused954]": 959, - "[unused955]": 960, - "[unused956]": 961, - "[unused957]": 962, - "[unused958]": 963, - "[unused959]": 964, - "[unused960]": 965, - "[unused961]": 966, - "[unused962]": 967, - "[unused963]": 968, - "[unused964]": 969, - "[unused965]": 970, - "[unused966]": 971, - "[unused967]": 972, - "[unused968]": 973, - "[unused969]": 974, - "[unused970]": 975, - "[unused971]": 976, - "[unused972]": 977, - "[unused973]": 978, - "[unused974]": 979, - "[unused975]": 980, - "[unused976]": 981, - "[unused977]": 982, - "[unused978]": 983, - "[unused979]": 984, - "[unused980]": 985, - "[unused981]": 986, - "[unused982]": 987, - "[unused983]": 988, - "[unused984]": 989, - "[unused985]": 990, - "[unused986]": 991, - "[unused987]": 992, - "[unused988]": 993, - "[unused989]": 994, - "[unused990]": 995, - "[unused991]": 996, - "[unused992]": 997, - "[unused993]": 998, - "!": 999, - "\"": 1000, - "#": 1001, - "$": 1002, - "%": 1003, - "&": 1004, - "'": 1005, - "(": 1006, - ")": 1007, - "*": 1008, - "+": 1009, - ",": 1010, - "-": 1011, - ".": 1012, - "/": 1013, - "0": 1014, - "1": 1015, - "2": 1016, - "3": 1017, - "4": 1018, - "5": 1019, - "6": 1020, - "7": 1021, - "8": 1022, - "9": 1023, - ":": 1024, - ";": 1025, - "<": 1026, - "=": 1027, - ">": 1028, - "?": 1029, - "@": 1030, - "[": 1031, - "\\": 1032, - "]": 1033, - "^": 1034, - "_": 1035, - "`": 1036, - "a": 1037, - "b": 1038, - "c": 1039, - "d": 1040, - "e": 1041, - "f": 1042, - "g": 1043, - "h": 1044, - "i": 1045, - "j": 1046, - "k": 1047, - "l": 1048, - "m": 1049, - "n": 1050, - "o": 1051, - "p": 1052, - "q": 1053, - "r": 1054, - "s": 1055, - "t": 1056, - "u": 1057, - "v": 1058, - "w": 1059, - "x": 1060, - "y": 1061, - "z": 1062, - "{": 1063, - "|": 1064, - "}": 1065, - "~": 1066, - "¡": 1067, - "¢": 1068, - "£": 1069, - "¤": 1070, - "¥": 1071, - "¦": 1072, - "§": 1073, - "¨": 1074, - "©": 1075, - "ª": 1076, - "«": 1077, - "¬": 1078, - "®": 1079, - "°": 1080, - "±": 1081, - "²": 1082, - "³": 1083, - "´": 1084, - "µ": 1085, - "¶": 1086, - "·": 1087, - "¹": 1088, - "º": 1089, - "»": 1090, - "¼": 1091, - "½": 1092, - "¾": 1093, - "¿": 1094, - "×": 1095, - "ß": 1096, - "æ": 1097, - "ð": 1098, - "÷": 1099, - "ø": 1100, - "þ": 1101, - "đ": 1102, - "ħ": 1103, - "ı": 1104, - "ł": 1105, - "ŋ": 1106, - "œ": 1107, - "ƒ": 1108, - "ɐ": 1109, - "ɑ": 1110, - "ɒ": 1111, - "ɔ": 1112, - "ɕ": 1113, - "ə": 1114, - "ɛ": 1115, - "ɡ": 1116, - "ɣ": 1117, - "ɨ": 1118, - "ɪ": 1119, - "ɫ": 1120, - "ɬ": 1121, - "ɯ": 1122, - "ɲ": 1123, - "ɴ": 1124, - "ɹ": 1125, - "ɾ": 1126, - "ʀ": 1127, - "ʁ": 1128, - "ʂ": 1129, - "ʃ": 1130, - "ʉ": 1131, - "ʊ": 1132, - "ʋ": 1133, - "ʌ": 1134, - "ʎ": 1135, - "ʐ": 1136, - "ʑ": 1137, - "ʒ": 1138, - "ʔ": 1139, - "ʰ": 1140, - "ʲ": 1141, - "ʳ": 1142, - "ʷ": 1143, - "ʸ": 1144, - "ʻ": 1145, - "ʼ": 1146, - "ʾ": 1147, - "ʿ": 1148, - "ˈ": 1149, - "ː": 1150, - "ˡ": 1151, - "ˢ": 1152, - "ˣ": 1153, - "ˤ": 1154, - "α": 1155, - "β": 1156, - "γ": 1157, - "δ": 1158, - "ε": 1159, - "ζ": 1160, - "η": 1161, - "θ": 1162, - "ι": 1163, - "κ": 1164, - "λ": 1165, - "μ": 1166, - "ν": 1167, - "ξ": 1168, - "ο": 1169, - "π": 1170, - "ρ": 1171, - "ς": 1172, - "σ": 1173, - "τ": 1174, - "υ": 1175, - "φ": 1176, - "χ": 1177, - "ψ": 1178, - "ω": 1179, - "а": 1180, - "б": 1181, - "в": 1182, - "г": 1183, - "д": 1184, - "е": 1185, - "ж": 1186, - "з": 1187, - "и": 1188, - "к": 1189, - "л": 1190, - "м": 1191, - "н": 1192, - "о": 1193, - "п": 1194, - "р": 1195, - "с": 1196, - "т": 1197, - "у": 1198, - "ф": 1199, - "х": 1200, - "ц": 1201, - "ч": 1202, - "ш": 1203, - "щ": 1204, - "ъ": 1205, - "ы": 1206, - "ь": 1207, - "э": 1208, - "ю": 1209, - "я": 1210, - "ђ": 1211, - "є": 1212, - "і": 1213, - "ј": 1214, - "љ": 1215, - "њ": 1216, - "ћ": 1217, - "ӏ": 1218, - "ա": 1219, - "բ": 1220, - "գ": 1221, - "դ": 1222, - "ե": 1223, - "թ": 1224, - "ի": 1225, - "լ": 1226, - "կ": 1227, - "հ": 1228, - "մ": 1229, - "յ": 1230, - "ն": 1231, - "ո": 1232, - "պ": 1233, - "ս": 1234, - "վ": 1235, - "տ": 1236, - "ր": 1237, - "ւ": 1238, - "ք": 1239, - "־": 1240, - "א": 1241, - "ב": 1242, - "ג": 1243, - "ד": 1244, - "ה": 1245, - "ו": 1246, - "ז": 1247, - "ח": 1248, - "ט": 1249, - "י": 1250, - "ך": 1251, - "כ": 1252, - "ל": 1253, - "ם": 1254, - "מ": 1255, - "ן": 1256, - "נ": 1257, - "ס": 1258, - "ע": 1259, - "ף": 1260, - "פ": 1261, - "ץ": 1262, - "צ": 1263, - "ק": 1264, - "ר": 1265, - "ש": 1266, - "ת": 1267, - "،": 1268, - "ء": 1269, - "ا": 1270, - "ب": 1271, - "ة": 1272, - "ت": 1273, - "ث": 1274, - "ج": 1275, - "ح": 1276, - "خ": 1277, - "د": 1278, - "ذ": 1279, - "ر": 1280, - "ز": 1281, - "س": 1282, - "ش": 1283, - "ص": 1284, - "ض": 1285, - "ط": 1286, - "ظ": 1287, - "ع": 1288, - "غ": 1289, - "ـ": 1290, - "ف": 1291, - "ق": 1292, - "ك": 1293, - "ل": 1294, - "م": 1295, - "ن": 1296, - "ه": 1297, - "و": 1298, - "ى": 1299, - "ي": 1300, - "ٹ": 1301, - "پ": 1302, - "چ": 1303, - "ک": 1304, - "گ": 1305, - "ں": 1306, - "ھ": 1307, - "ہ": 1308, - "ی": 1309, - "ے": 1310, - "अ": 1311, - "आ": 1312, - "उ": 1313, - "ए": 1314, - "क": 1315, - "ख": 1316, - "ग": 1317, - "च": 1318, - "ज": 1319, - "ट": 1320, - "ड": 1321, - "ण": 1322, - "त": 1323, - "थ": 1324, - "द": 1325, - "ध": 1326, - "न": 1327, - "प": 1328, - "ब": 1329, - "भ": 1330, - "म": 1331, - "य": 1332, - "र": 1333, - "ल": 1334, - "व": 1335, - "श": 1336, - "ष": 1337, - "स": 1338, - "ह": 1339, - "ा": 1340, - "ि": 1341, - "ी": 1342, - "ो": 1343, - "।": 1344, - "॥": 1345, - "ং": 1346, - "অ": 1347, - "আ": 1348, - "ই": 1349, - "উ": 1350, - "এ": 1351, - "ও": 1352, - "ক": 1353, - "খ": 1354, - "গ": 1355, - "চ": 1356, - "ছ": 1357, - "জ": 1358, - "ট": 1359, - "ড": 1360, - "ণ": 1361, - "ত": 1362, - "থ": 1363, - "দ": 1364, - "ধ": 1365, - "ন": 1366, - "প": 1367, - "ব": 1368, - "ভ": 1369, - "ম": 1370, - "য": 1371, - "র": 1372, - "ল": 1373, - "শ": 1374, - "ষ": 1375, - "স": 1376, - "হ": 1377, - "া": 1378, - "ি": 1379, - "ী": 1380, - "ে": 1381, - "க": 1382, - "ச": 1383, - "ட": 1384, - "த": 1385, - "ந": 1386, - "ன": 1387, - "ப": 1388, - "ம": 1389, - "ய": 1390, - "ர": 1391, - "ல": 1392, - "ள": 1393, - "வ": 1394, - "ா": 1395, - "ி": 1396, - "ு": 1397, - "ே": 1398, - "ை": 1399, - "ನ": 1400, - "ರ": 1401, - "ಾ": 1402, - "ක": 1403, - "ය": 1404, - "ර": 1405, - "ල": 1406, - "ව": 1407, - "ා": 1408, - "ก": 1409, - "ง": 1410, - "ต": 1411, - "ท": 1412, - "น": 1413, - "พ": 1414, - "ม": 1415, - "ย": 1416, - "ร": 1417, - "ล": 1418, - "ว": 1419, - "ส": 1420, - "อ": 1421, - "า": 1422, - "เ": 1423, - "་": 1424, - "།": 1425, - "ག": 1426, - "ང": 1427, - "ད": 1428, - "ན": 1429, - "པ": 1430, - "བ": 1431, - "མ": 1432, - "འ": 1433, - "ར": 1434, - "ལ": 1435, - "ས": 1436, - "မ": 1437, - "ა": 1438, - "ბ": 1439, - "გ": 1440, - "დ": 1441, - "ე": 1442, - "ვ": 1443, - "თ": 1444, - "ი": 1445, - "კ": 1446, - "ლ": 1447, - "მ": 1448, - "ნ": 1449, - "ო": 1450, - "რ": 1451, - "ს": 1452, - "ტ": 1453, - "უ": 1454, - "ᄀ": 1455, - "ᄂ": 1456, - "ᄃ": 1457, - "ᄅ": 1458, - "ᄆ": 1459, - "ᄇ": 1460, - "ᄉ": 1461, - "ᄊ": 1462, - "ᄋ": 1463, - "ᄌ": 1464, - "ᄎ": 1465, - "ᄏ": 1466, - "ᄐ": 1467, - "ᄑ": 1468, - "ᄒ": 1469, - "ᅡ": 1470, - "ᅢ": 1471, - "ᅥ": 1472, - "ᅦ": 1473, - "ᅧ": 1474, - "ᅩ": 1475, - "ᅪ": 1476, - "ᅭ": 1477, - "ᅮ": 1478, - "ᅯ": 1479, - "ᅲ": 1480, - "ᅳ": 1481, - "ᅴ": 1482, - "ᅵ": 1483, - "ᆨ": 1484, - "ᆫ": 1485, - "ᆯ": 1486, - "ᆷ": 1487, - "ᆸ": 1488, - "ᆼ": 1489, - "ᴬ": 1490, - "ᴮ": 1491, - "ᴰ": 1492, - "ᴵ": 1493, - "ᴺ": 1494, - "ᵀ": 1495, - "ᵃ": 1496, - "ᵇ": 1497, - "ᵈ": 1498, - "ᵉ": 1499, - "ᵍ": 1500, - "ᵏ": 1501, - "ᵐ": 1502, - "ᵒ": 1503, - "ᵖ": 1504, - "ᵗ": 1505, - "ᵘ": 1506, - "ᵢ": 1507, - "ᵣ": 1508, - "ᵤ": 1509, - "ᵥ": 1510, - "ᶜ": 1511, - "ᶠ": 1512, - "‐": 1513, - "‑": 1514, - "‒": 1515, - "–": 1516, - "—": 1517, - "―": 1518, - "‖": 1519, - "‘": 1520, - "’": 1521, - "‚": 1522, - "“": 1523, - "”": 1524, - "„": 1525, - "†": 1526, - "‡": 1527, - "•": 1528, - "…": 1529, - "‰": 1530, - "′": 1531, - "″": 1532, - "›": 1533, - "‿": 1534, - "⁄": 1535, - "⁰": 1536, - "ⁱ": 1537, - "⁴": 1538, - "⁵": 1539, - "⁶": 1540, - "⁷": 1541, - "⁸": 1542, - "⁹": 1543, - "⁺": 1544, - "⁻": 1545, - "ⁿ": 1546, - "₀": 1547, - "₁": 1548, - "₂": 1549, - "₃": 1550, - "₄": 1551, - "₅": 1552, - "₆": 1553, - "₇": 1554, - "₈": 1555, - "₉": 1556, - "₊": 1557, - "₍": 1558, - "₎": 1559, - "ₐ": 1560, - "ₑ": 1561, - "ₒ": 1562, - "ₓ": 1563, - "ₕ": 1564, - "ₖ": 1565, - "ₗ": 1566, - "ₘ": 1567, - "ₙ": 1568, - "ₚ": 1569, - "ₛ": 1570, - "ₜ": 1571, - "₤": 1572, - "₩": 1573, - "€": 1574, - "₱": 1575, - "₹": 1576, - "ℓ": 1577, - "№": 1578, - "ℝ": 1579, - "™": 1580, - "⅓": 1581, - "⅔": 1582, - "←": 1583, - "↑": 1584, - "→": 1585, - "↓": 1586, - "↔": 1587, - "↦": 1588, - "⇄": 1589, - "⇌": 1590, - "⇒": 1591, - "∂": 1592, - "∅": 1593, - "∆": 1594, - "∇": 1595, - "∈": 1596, - "−": 1597, - "∗": 1598, - "∘": 1599, - "√": 1600, - "∞": 1601, - "∧": 1602, - "∨": 1603, - "∩": 1604, - "∪": 1605, - "≈": 1606, - "≡": 1607, - "≤": 1608, - "≥": 1609, - "⊂": 1610, - "⊆": 1611, - "⊕": 1612, - "⊗": 1613, - "⋅": 1614, - "─": 1615, - "│": 1616, - "■": 1617, - "▪": 1618, - "●": 1619, - "★": 1620, - "☆": 1621, - "☉": 1622, - "♠": 1623, - "♣": 1624, - "♥": 1625, - "♦": 1626, - "♭": 1627, - "♯": 1628, - "⟨": 1629, - "⟩": 1630, - "ⱼ": 1631, - "⺩": 1632, - "⺼": 1633, - "⽥": 1634, - "、": 1635, - "。": 1636, - "〈": 1637, - "〉": 1638, - "《": 1639, - "》": 1640, - "「": 1641, - "」": 1642, - "『": 1643, - "』": 1644, - "〜": 1645, - "あ": 1646, - "い": 1647, - "う": 1648, - "え": 1649, - "お": 1650, - "か": 1651, - "き": 1652, - "く": 1653, - "け": 1654, - "こ": 1655, - "さ": 1656, - "し": 1657, - "す": 1658, - "せ": 1659, - "そ": 1660, - "た": 1661, - "ち": 1662, - "っ": 1663, - "つ": 1664, - "て": 1665, - "と": 1666, - "な": 1667, - "に": 1668, - "ぬ": 1669, - "ね": 1670, - "の": 1671, - "は": 1672, - "ひ": 1673, - "ふ": 1674, - "へ": 1675, - "ほ": 1676, - "ま": 1677, - "み": 1678, - "む": 1679, - "め": 1680, - "も": 1681, - "や": 1682, - "ゆ": 1683, - "よ": 1684, - "ら": 1685, - "り": 1686, - "る": 1687, - "れ": 1688, - "ろ": 1689, - "を": 1690, - "ん": 1691, - "ァ": 1692, - "ア": 1693, - "ィ": 1694, - "イ": 1695, - "ウ": 1696, - "ェ": 1697, - "エ": 1698, - "オ": 1699, - "カ": 1700, - "キ": 1701, - "ク": 1702, - "ケ": 1703, - "コ": 1704, - "サ": 1705, - "シ": 1706, - "ス": 1707, - "セ": 1708, - "タ": 1709, - "チ": 1710, - "ッ": 1711, - "ツ": 1712, - "テ": 1713, - "ト": 1714, - "ナ": 1715, - "ニ": 1716, - "ノ": 1717, - "ハ": 1718, - "ヒ": 1719, - "フ": 1720, - "ヘ": 1721, - "ホ": 1722, - "マ": 1723, - "ミ": 1724, - "ム": 1725, - "メ": 1726, - "モ": 1727, - "ャ": 1728, - "ュ": 1729, - "ョ": 1730, - "ラ": 1731, - "リ": 1732, - "ル": 1733, - "レ": 1734, - "ロ": 1735, - "ワ": 1736, - "ン": 1737, - "・": 1738, - "ー": 1739, - "一": 1740, - "三": 1741, - "上": 1742, - "下": 1743, - "不": 1744, - "世": 1745, - "中": 1746, - "主": 1747, - "久": 1748, - "之": 1749, - "也": 1750, - "事": 1751, - "二": 1752, - "五": 1753, - "井": 1754, - "京": 1755, - "人": 1756, - "亻": 1757, - "仁": 1758, - "介": 1759, - "代": 1760, - "仮": 1761, - "伊": 1762, - "会": 1763, - "佐": 1764, - "侍": 1765, - "保": 1766, - "信": 1767, - "健": 1768, - "元": 1769, - "光": 1770, - "八": 1771, - "公": 1772, - "内": 1773, - "出": 1774, - "分": 1775, - "前": 1776, - "劉": 1777, - "力": 1778, - "加": 1779, - "勝": 1780, - "北": 1781, - "区": 1782, - "十": 1783, - "千": 1784, - "南": 1785, - "博": 1786, - "原": 1787, - "口": 1788, - "古": 1789, - "史": 1790, - "司": 1791, - "合": 1792, - "吉": 1793, - "同": 1794, - "名": 1795, - "和": 1796, - "囗": 1797, - "四": 1798, - "国": 1799, - "國": 1800, - "土": 1801, - "地": 1802, - "坂": 1803, - "城": 1804, - "堂": 1805, - "場": 1806, - "士": 1807, - "夏": 1808, - "外": 1809, - "大": 1810, - "天": 1811, - "太": 1812, - "夫": 1813, - "奈": 1814, - "女": 1815, - "子": 1816, - "学": 1817, - "宀": 1818, - "宇": 1819, - "安": 1820, - "宗": 1821, - "定": 1822, - "宣": 1823, - "宮": 1824, - "家": 1825, - "宿": 1826, - "寺": 1827, - "將": 1828, - "小": 1829, - "尚": 1830, - "山": 1831, - "岡": 1832, - "島": 1833, - "崎": 1834, - "川": 1835, - "州": 1836, - "巿": 1837, - "帝": 1838, - "平": 1839, - "年": 1840, - "幸": 1841, - "广": 1842, - "弘": 1843, - "張": 1844, - "彳": 1845, - "後": 1846, - "御": 1847, - "德": 1848, - "心": 1849, - "忄": 1850, - "志": 1851, - "忠": 1852, - "愛": 1853, - "成": 1854, - "我": 1855, - "戦": 1856, - "戸": 1857, - "手": 1858, - "扌": 1859, - "政": 1860, - "文": 1861, - "新": 1862, - "方": 1863, - "日": 1864, - "明": 1865, - "星": 1866, - "春": 1867, - "昭": 1868, - "智": 1869, - "曲": 1870, - "書": 1871, - "月": 1872, - "有": 1873, - "朝": 1874, - "木": 1875, - "本": 1876, - "李": 1877, - "村": 1878, - "東": 1879, - "松": 1880, - "林": 1881, - "森": 1882, - "楊": 1883, - "樹": 1884, - "橋": 1885, - "歌": 1886, - "止": 1887, - "正": 1888, - "武": 1889, - "比": 1890, - "氏": 1891, - "民": 1892, - "水": 1893, - "氵": 1894, - "氷": 1895, - "永": 1896, - "江": 1897, - "沢": 1898, - "河": 1899, - "治": 1900, - "法": 1901, - "海": 1902, - "清": 1903, - "漢": 1904, - "瀬": 1905, - "火": 1906, - "版": 1907, - "犬": 1908, - "王": 1909, - "生": 1910, - "田": 1911, - "男": 1912, - "疒": 1913, - "発": 1914, - "白": 1915, - "的": 1916, - "皇": 1917, - "目": 1918, - "相": 1919, - "省": 1920, - "真": 1921, - "石": 1922, - "示": 1923, - "社": 1924, - "神": 1925, - "福": 1926, - "禾": 1927, - "秀": 1928, - "秋": 1929, - "空": 1930, - "立": 1931, - "章": 1932, - "竹": 1933, - "糹": 1934, - "美": 1935, - "義": 1936, - "耳": 1937, - "良": 1938, - "艹": 1939, - "花": 1940, - "英": 1941, - "華": 1942, - "葉": 1943, - "藤": 1944, - "行": 1945, - "街": 1946, - "西": 1947, - "見": 1948, - "訁": 1949, - "語": 1950, - "谷": 1951, - "貝": 1952, - "貴": 1953, - "車": 1954, - "軍": 1955, - "辶": 1956, - "道": 1957, - "郎": 1958, - "郡": 1959, - "部": 1960, - "都": 1961, - "里": 1962, - "野": 1963, - "金": 1964, - "鈴": 1965, - "镇": 1966, - "長": 1967, - "門": 1968, - "間": 1969, - "阝": 1970, - "阿": 1971, - "陳": 1972, - "陽": 1973, - "雄": 1974, - "青": 1975, - "面": 1976, - "風": 1977, - "食": 1978, - "香": 1979, - "馬": 1980, - "高": 1981, - "龍": 1982, - "龸": 1983, - "fi": 1984, - "fl": 1985, - "!": 1986, - "(": 1987, - ")": 1988, - ",": 1989, - "-": 1990, - ".": 1991, - "/": 1992, - ":": 1993, - "?": 1994, - "~": 1995, - "the": 1996, - "of": 1997, - "and": 1998, - "in": 1999, - "to": 2000, - "was": 2001, - "he": 2002, - "is": 2003, - "as": 2004, - "for": 2005, - "on": 2006, - "with": 2007, - "that": 2008, - "it": 2009, - "his": 2010, - "by": 2011, - "at": 2012, - "from": 2013, - "her": 2014, - "##s": 2015, - "she": 2016, - "you": 2017, - "had": 2018, - "an": 2019, - "were": 2020, - "but": 2021, - "be": 2022, - "this": 2023, - "are": 2024, - "not": 2025, - "my": 2026, - "they": 2027, - "one": 2028, - "which": 2029, - "or": 2030, - "have": 2031, - "him": 2032, - "me": 2033, - "first": 2034, - "all": 2035, - "also": 2036, - "their": 2037, - "has": 2038, - "up": 2039, - "who": 2040, - "out": 2041, - "been": 2042, - "when": 2043, - "after": 2044, - "there": 2045, - "into": 2046, - "new": 2047, - "two": 2048, - "its": 2049, - "##a": 2050, - "time": 2051, - "would": 2052, - "no": 2053, - "what": 2054, - "about": 2055, - "said": 2056, - "we": 2057, - "over": 2058, - "then": 2059, - "other": 2060, - "so": 2061, - "more": 2062, - "##e": 2063, - "can": 2064, - "if": 2065, - "like": 2066, - "back": 2067, - "them": 2068, - "only": 2069, - "some": 2070, - "could": 2071, - "##i": 2072, - "where": 2073, - "just": 2074, - "##ing": 2075, - "during": 2076, - "before": 2077, - "##n": 2078, - "do": 2079, - "##o": 2080, - "made": 2081, - "school": 2082, - "through": 2083, - "than": 2084, - "now": 2085, - "years": 2086, - "most": 2087, - "world": 2088, - "may": 2089, - "between": 2090, - "down": 2091, - "well": 2092, - "three": 2093, - "##d": 2094, - "year": 2095, - "while": 2096, - "will": 2097, - "##ed": 2098, - "##r": 2099, - "##y": 2100, - "later": 2101, - "##t": 2102, - "city": 2103, - "under": 2104, - "around": 2105, - "did": 2106, - "such": 2107, - "being": 2108, - "used": 2109, - "state": 2110, - "people": 2111, - "part": 2112, - "know": 2113, - "against": 2114, - "your": 2115, - "many": 2116, - "second": 2117, - "university": 2118, - "both": 2119, - "national": 2120, - "##er": 2121, - "these": 2122, - "don": 2123, - "known": 2124, - "off": 2125, - "way": 2126, - "until": 2127, - "re": 2128, - "how": 2129, - "even": 2130, - "get": 2131, - "head": 2132, - "...": 2133, - "didn": 2134, - "##ly": 2135, - "team": 2136, - "american": 2137, - "because": 2138, - "de": 2139, - "##l": 2140, - "born": 2141, - "united": 2142, - "film": 2143, - "since": 2144, - "still": 2145, - "long": 2146, - "work": 2147, - "south": 2148, - "us": 2149, - "became": 2150, - "any": 2151, - "high": 2152, - "again": 2153, - "day": 2154, - "family": 2155, - "see": 2156, - "right": 2157, - "man": 2158, - "eyes": 2159, - "house": 2160, - "season": 2161, - "war": 2162, - "states": 2163, - "including": 2164, - "took": 2165, - "life": 2166, - "north": 2167, - "same": 2168, - "each": 2169, - "called": 2170, - "name": 2171, - "much": 2172, - "place": 2173, - "however": 2174, - "go": 2175, - "four": 2176, - "group": 2177, - "another": 2178, - "found": 2179, - "won": 2180, - "area": 2181, - "here": 2182, - "going": 2183, - "10": 2184, - "away": 2185, - "series": 2186, - "left": 2187, - "home": 2188, - "music": 2189, - "best": 2190, - "make": 2191, - "hand": 2192, - "number": 2193, - "company": 2194, - "several": 2195, - "never": 2196, - "last": 2197, - "john": 2198, - "000": 2199, - "very": 2200, - "album": 2201, - "take": 2202, - "end": 2203, - "good": 2204, - "too": 2205, - "following": 2206, - "released": 2207, - "game": 2208, - "played": 2209, - "little": 2210, - "began": 2211, - "district": 2212, - "##m": 2213, - "old": 2214, - "want": 2215, - "those": 2216, - "side": 2217, - "held": 2218, - "own": 2219, - "early": 2220, - "county": 2221, - "ll": 2222, - "league": 2223, - "use": 2224, - "west": 2225, - "##u": 2226, - "face": 2227, - "think": 2228, - "##es": 2229, - "2010": 2230, - "government": 2231, - "##h": 2232, - "march": 2233, - "came": 2234, - "small": 2235, - "general": 2236, - "town": 2237, - "june": 2238, - "##on": 2239, - "line": 2240, - "based": 2241, - "something": 2242, - "##k": 2243, - "september": 2244, - "thought": 2245, - "looked": 2246, - "along": 2247, - "international": 2248, - "2011": 2249, - "air": 2250, - "july": 2251, - "club": 2252, - "went": 2253, - "january": 2254, - "october": 2255, - "our": 2256, - "august": 2257, - "april": 2258, - "york": 2259, - "12": 2260, - "few": 2261, - "2012": 2262, - "2008": 2263, - "east": 2264, - "show": 2265, - "member": 2266, - "college": 2267, - "2009": 2268, - "father": 2269, - "public": 2270, - "##us": 2271, - "come": 2272, - "men": 2273, - "five": 2274, - "set": 2275, - "station": 2276, - "church": 2277, - "##c": 2278, - "next": 2279, - "former": 2280, - "november": 2281, - "room": 2282, - "party": 2283, - "located": 2284, - "december": 2285, - "2013": 2286, - "age": 2287, - "got": 2288, - "2007": 2289, - "##g": 2290, - "system": 2291, - "let": 2292, - "love": 2293, - "2006": 2294, - "though": 2295, - "every": 2296, - "2014": 2297, - "look": 2298, - "song": 2299, - "water": 2300, - "century": 2301, - "without": 2302, - "body": 2303, - "black": 2304, - "night": 2305, - "within": 2306, - "great": 2307, - "women": 2308, - "single": 2309, - "ve": 2310, - "building": 2311, - "large": 2312, - "population": 2313, - "river": 2314, - "named": 2315, - "band": 2316, - "white": 2317, - "started": 2318, - "##an": 2319, - "once": 2320, - "15": 2321, - "20": 2322, - "should": 2323, - "18": 2324, - "2015": 2325, - "service": 2326, - "top": 2327, - "built": 2328, - "british": 2329, - "open": 2330, - "death": 2331, - "king": 2332, - "moved": 2333, - "local": 2334, - "times": 2335, - "children": 2336, - "february": 2337, - "book": 2338, - "why": 2339, - "11": 2340, - "door": 2341, - "need": 2342, - "president": 2343, - "order": 2344, - "final": 2345, - "road": 2346, - "wasn": 2347, - "although": 2348, - "due": 2349, - "major": 2350, - "died": 2351, - "village": 2352, - "third": 2353, - "knew": 2354, - "2016": 2355, - "asked": 2356, - "turned": 2357, - "st": 2358, - "wanted": 2359, - "say": 2360, - "##p": 2361, - "together": 2362, - "received": 2363, - "main": 2364, - "son": 2365, - "served": 2366, - "different": 2367, - "##en": 2368, - "behind": 2369, - "himself": 2370, - "felt": 2371, - "members": 2372, - "power": 2373, - "football": 2374, - "law": 2375, - "voice": 2376, - "play": 2377, - "##in": 2378, - "near": 2379, - "park": 2380, - "history": 2381, - "30": 2382, - "having": 2383, - "2005": 2384, - "16": 2385, - "##man": 2386, - "saw": 2387, - "mother": 2388, - "##al": 2389, - "army": 2390, - "point": 2391, - "front": 2392, - "help": 2393, - "english": 2394, - "street": 2395, - "art": 2396, - "late": 2397, - "hands": 2398, - "games": 2399, - "award": 2400, - "##ia": 2401, - "young": 2402, - "14": 2403, - "put": 2404, - "published": 2405, - "country": 2406, - "division": 2407, - "across": 2408, - "told": 2409, - "13": 2410, - "often": 2411, - "ever": 2412, - "french": 2413, - "london": 2414, - "center": 2415, - "six": 2416, - "red": 2417, - "2017": 2418, - "led": 2419, - "days": 2420, - "include": 2421, - "light": 2422, - "25": 2423, - "find": 2424, - "tell": 2425, - "among": 2426, - "species": 2427, - "really": 2428, - "according": 2429, - "central": 2430, - "half": 2431, - "2004": 2432, - "form": 2433, - "original": 2434, - "gave": 2435, - "office": 2436, - "making": 2437, - "enough": 2438, - "lost": 2439, - "full": 2440, - "opened": 2441, - "must": 2442, - "included": 2443, - "live": 2444, - "given": 2445, - "german": 2446, - "player": 2447, - "run": 2448, - "business": 2449, - "woman": 2450, - "community": 2451, - "cup": 2452, - "might": 2453, - "million": 2454, - "land": 2455, - "2000": 2456, - "court": 2457, - "development": 2458, - "17": 2459, - "short": 2460, - "round": 2461, - "ii": 2462, - "km": 2463, - "seen": 2464, - "class": 2465, - "story": 2466, - "always": 2467, - "become": 2468, - "sure": 2469, - "research": 2470, - "almost": 2471, - "director": 2472, - "council": 2473, - "la": 2474, - "##2": 2475, - "career": 2476, - "things": 2477, - "using": 2478, - "island": 2479, - "##z": 2480, - "couldn": 2481, - "car": 2482, - "##is": 2483, - "24": 2484, - "close": 2485, - "force": 2486, - "##1": 2487, - "better": 2488, - "free": 2489, - "support": 2490, - "control": 2491, - "field": 2492, - "students": 2493, - "2003": 2494, - "education": 2495, - "married": 2496, - "##b": 2497, - "nothing": 2498, - "worked": 2499, - "others": 2500, - "record": 2501, - "big": 2502, - "inside": 2503, - "level": 2504, - "anything": 2505, - "continued": 2506, - "give": 2507, - "james": 2508, - "##3": 2509, - "military": 2510, - "established": 2511, - "non": 2512, - "returned": 2513, - "feel": 2514, - "does": 2515, - "title": 2516, - "written": 2517, - "thing": 2518, - "feet": 2519, - "william": 2520, - "far": 2521, - "co": 2522, - "association": 2523, - "hard": 2524, - "already": 2525, - "2002": 2526, - "##ra": 2527, - "championship": 2528, - "human": 2529, - "western": 2530, - "100": 2531, - "##na": 2532, - "department": 2533, - "hall": 2534, - "role": 2535, - "various": 2536, - "production": 2537, - "21": 2538, - "19": 2539, - "heart": 2540, - "2001": 2541, - "living": 2542, - "fire": 2543, - "version": 2544, - "##ers": 2545, - "##f": 2546, - "television": 2547, - "royal": 2548, - "##4": 2549, - "produced": 2550, - "working": 2551, - "act": 2552, - "case": 2553, - "society": 2554, - "region": 2555, - "present": 2556, - "radio": 2557, - "period": 2558, - "looking": 2559, - "least": 2560, - "total": 2561, - "keep": 2562, - "england": 2563, - "wife": 2564, - "program": 2565, - "per": 2566, - "brother": 2567, - "mind": 2568, - "special": 2569, - "22": 2570, - "##le": 2571, - "am": 2572, - "works": 2573, - "soon": 2574, - "##6": 2575, - "political": 2576, - "george": 2577, - "services": 2578, - "taken": 2579, - "created": 2580, - "##7": 2581, - "further": 2582, - "able": 2583, - "reached": 2584, - "david": 2585, - "union": 2586, - "joined": 2587, - "upon": 2588, - "done": 2589, - "important": 2590, - "social": 2591, - "information": 2592, - "either": 2593, - "##ic": 2594, - "##x": 2595, - "appeared": 2596, - "position": 2597, - "ground": 2598, - "lead": 2599, - "rock": 2600, - "dark": 2601, - "election": 2602, - "23": 2603, - "board": 2604, - "france": 2605, - "hair": 2606, - "course": 2607, - "arms": 2608, - "site": 2609, - "police": 2610, - "girl": 2611, - "instead": 2612, - "real": 2613, - "sound": 2614, - "##v": 2615, - "words": 2616, - "moment": 2617, - "##te": 2618, - "someone": 2619, - "##8": 2620, - "summer": 2621, - "project": 2622, - "announced": 2623, - "san": 2624, - "less": 2625, - "wrote": 2626, - "past": 2627, - "followed": 2628, - "##5": 2629, - "blue": 2630, - "founded": 2631, - "al": 2632, - "finally": 2633, - "india": 2634, - "taking": 2635, - "records": 2636, - "america": 2637, - "##ne": 2638, - "1999": 2639, - "design": 2640, - "considered": 2641, - "northern": 2642, - "god": 2643, - "stop": 2644, - "battle": 2645, - "toward": 2646, - "european": 2647, - "outside": 2648, - "described": 2649, - "track": 2650, - "today": 2651, - "playing": 2652, - "language": 2653, - "28": 2654, - "call": 2655, - "26": 2656, - "heard": 2657, - "professional": 2658, - "low": 2659, - "australia": 2660, - "miles": 2661, - "california": 2662, - "win": 2663, - "yet": 2664, - "green": 2665, - "##ie": 2666, - "trying": 2667, - "blood": 2668, - "##ton": 2669, - "southern": 2670, - "science": 2671, - "maybe": 2672, - "everything": 2673, - "match": 2674, - "square": 2675, - "27": 2676, - "mouth": 2677, - "video": 2678, - "race": 2679, - "recorded": 2680, - "leave": 2681, - "above": 2682, - "##9": 2683, - "daughter": 2684, - "points": 2685, - "space": 2686, - "1998": 2687, - "museum": 2688, - "change": 2689, - "middle": 2690, - "common": 2691, - "##0": 2692, - "move": 2693, - "tv": 2694, - "post": 2695, - "##ta": 2696, - "lake": 2697, - "seven": 2698, - "tried": 2699, - "elected": 2700, - "closed": 2701, - "ten": 2702, - "paul": 2703, - "minister": 2704, - "##th": 2705, - "months": 2706, - "start": 2707, - "chief": 2708, - "return": 2709, - "canada": 2710, - "person": 2711, - "sea": 2712, - "release": 2713, - "similar": 2714, - "modern": 2715, - "brought": 2716, - "rest": 2717, - "hit": 2718, - "formed": 2719, - "mr": 2720, - "##la": 2721, - "1997": 2722, - "floor": 2723, - "event": 2724, - "doing": 2725, - "thomas": 2726, - "1996": 2727, - "robert": 2728, - "care": 2729, - "killed": 2730, - "training": 2731, - "star": 2732, - "week": 2733, - "needed": 2734, - "turn": 2735, - "finished": 2736, - "railway": 2737, - "rather": 2738, - "news": 2739, - "health": 2740, - "sent": 2741, - "example": 2742, - "ran": 2743, - "term": 2744, - "michael": 2745, - "coming": 2746, - "currently": 2747, - "yes": 2748, - "forces": 2749, - "despite": 2750, - "gold": 2751, - "areas": 2752, - "50": 2753, - "stage": 2754, - "fact": 2755, - "29": 2756, - "dead": 2757, - "says": 2758, - "popular": 2759, - "2018": 2760, - "originally": 2761, - "germany": 2762, - "probably": 2763, - "developed": 2764, - "result": 2765, - "pulled": 2766, - "friend": 2767, - "stood": 2768, - "money": 2769, - "running": 2770, - "mi": 2771, - "signed": 2772, - "word": 2773, - "songs": 2774, - "child": 2775, - "eventually": 2776, - "met": 2777, - "tour": 2778, - "average": 2779, - "teams": 2780, - "minutes": 2781, - "festival": 2782, - "current": 2783, - "deep": 2784, - "kind": 2785, - "1995": 2786, - "decided": 2787, - "usually": 2788, - "eastern": 2789, - "seemed": 2790, - "##ness": 2791, - "episode": 2792, - "bed": 2793, - "added": 2794, - "table": 2795, - "indian": 2796, - "private": 2797, - "charles": 2798, - "route": 2799, - "available": 2800, - "idea": 2801, - "throughout": 2802, - "centre": 2803, - "addition": 2804, - "appointed": 2805, - "style": 2806, - "1994": 2807, - "books": 2808, - "eight": 2809, - "construction": 2810, - "press": 2811, - "mean": 2812, - "wall": 2813, - "friends": 2814, - "remained": 2815, - "schools": 2816, - "study": 2817, - "##ch": 2818, - "##um": 2819, - "institute": 2820, - "oh": 2821, - "chinese": 2822, - "sometimes": 2823, - "events": 2824, - "possible": 2825, - "1992": 2826, - "australian": 2827, - "type": 2828, - "brown": 2829, - "forward": 2830, - "talk": 2831, - "process": 2832, - "food": 2833, - "debut": 2834, - "seat": 2835, - "performance": 2836, - "committee": 2837, - "features": 2838, - "character": 2839, - "arts": 2840, - "herself": 2841, - "else": 2842, - "lot": 2843, - "strong": 2844, - "russian": 2845, - "range": 2846, - "hours": 2847, - "peter": 2848, - "arm": 2849, - "##da": 2850, - "morning": 2851, - "dr": 2852, - "sold": 2853, - "##ry": 2854, - "quickly": 2855, - "directed": 2856, - "1993": 2857, - "guitar": 2858, - "china": 2859, - "##w": 2860, - "31": 2861, - "list": 2862, - "##ma": 2863, - "performed": 2864, - "media": 2865, - "uk": 2866, - "players": 2867, - "smile": 2868, - "##rs": 2869, - "myself": 2870, - "40": 2871, - "placed": 2872, - "coach": 2873, - "province": 2874, - "towards": 2875, - "wouldn": 2876, - "leading": 2877, - "whole": 2878, - "boy": 2879, - "official": 2880, - "designed": 2881, - "grand": 2882, - "census": 2883, - "##el": 2884, - "europe": 2885, - "attack": 2886, - "japanese": 2887, - "henry": 2888, - "1991": 2889, - "##re": 2890, - "##os": 2891, - "cross": 2892, - "getting": 2893, - "alone": 2894, - "action": 2895, - "lower": 2896, - "network": 2897, - "wide": 2898, - "washington": 2899, - "japan": 2900, - "1990": 2901, - "hospital": 2902, - "believe": 2903, - "changed": 2904, - "sister": 2905, - "##ar": 2906, - "hold": 2907, - "gone": 2908, - "sir": 2909, - "hadn": 2910, - "ship": 2911, - "##ka": 2912, - "studies": 2913, - "academy": 2914, - "shot": 2915, - "rights": 2916, - "below": 2917, - "base": 2918, - "bad": 2919, - "involved": 2920, - "kept": 2921, - "largest": 2922, - "##ist": 2923, - "bank": 2924, - "future": 2925, - "especially": 2926, - "beginning": 2927, - "mark": 2928, - "movement": 2929, - "section": 2930, - "female": 2931, - "magazine": 2932, - "plan": 2933, - "professor": 2934, - "lord": 2935, - "longer": 2936, - "##ian": 2937, - "sat": 2938, - "walked": 2939, - "hill": 2940, - "actually": 2941, - "civil": 2942, - "energy": 2943, - "model": 2944, - "families": 2945, - "size": 2946, - "thus": 2947, - "aircraft": 2948, - "completed": 2949, - "includes": 2950, - "data": 2951, - "captain": 2952, - "##or": 2953, - "fight": 2954, - "vocals": 2955, - "featured": 2956, - "richard": 2957, - "bridge": 2958, - "fourth": 2959, - "1989": 2960, - "officer": 2961, - "stone": 2962, - "hear": 2963, - "##ism": 2964, - "means": 2965, - "medical": 2966, - "groups": 2967, - "management": 2968, - "self": 2969, - "lips": 2970, - "competition": 2971, - "entire": 2972, - "lived": 2973, - "technology": 2974, - "leaving": 2975, - "federal": 2976, - "tournament": 2977, - "bit": 2978, - "passed": 2979, - "hot": 2980, - "independent": 2981, - "awards": 2982, - "kingdom": 2983, - "mary": 2984, - "spent": 2985, - "fine": 2986, - "doesn": 2987, - "reported": 2988, - "##ling": 2989, - "jack": 2990, - "fall": 2991, - "raised": 2992, - "itself": 2993, - "stay": 2994, - "true": 2995, - "studio": 2996, - "1988": 2997, - "sports": 2998, - "replaced": 2999, - "paris": 3000, - "systems": 3001, - "saint": 3002, - "leader": 3003, - "theatre": 3004, - "whose": 3005, - "market": 3006, - "capital": 3007, - "parents": 3008, - "spanish": 3009, - "canadian": 3010, - "earth": 3011, - "##ity": 3012, - "cut": 3013, - "degree": 3014, - "writing": 3015, - "bay": 3016, - "christian": 3017, - "awarded": 3018, - "natural": 3019, - "higher": 3020, - "bill": 3021, - "##as": 3022, - "coast": 3023, - "provided": 3024, - "previous": 3025, - "senior": 3026, - "ft": 3027, - "valley": 3028, - "organization": 3029, - "stopped": 3030, - "onto": 3031, - "countries": 3032, - "parts": 3033, - "conference": 3034, - "queen": 3035, - "security": 3036, - "interest": 3037, - "saying": 3038, - "allowed": 3039, - "master": 3040, - "earlier": 3041, - "phone": 3042, - "matter": 3043, - "smith": 3044, - "winning": 3045, - "try": 3046, - "happened": 3047, - "moving": 3048, - "campaign": 3049, - "los": 3050, - "##ley": 3051, - "breath": 3052, - "nearly": 3053, - "mid": 3054, - "1987": 3055, - "certain": 3056, - "girls": 3057, - "date": 3058, - "italian": 3059, - "african": 3060, - "standing": 3061, - "fell": 3062, - "artist": 3063, - "##ted": 3064, - "shows": 3065, - "deal": 3066, - "mine": 3067, - "industry": 3068, - "1986": 3069, - "##ng": 3070, - "everyone": 3071, - "republic": 3072, - "provide": 3073, - "collection": 3074, - "library": 3075, - "student": 3076, - "##ville": 3077, - "primary": 3078, - "owned": 3079, - "older": 3080, - "via": 3081, - "heavy": 3082, - "1st": 3083, - "makes": 3084, - "##able": 3085, - "attention": 3086, - "anyone": 3087, - "africa": 3088, - "##ri": 3089, - "stated": 3090, - "length": 3091, - "ended": 3092, - "fingers": 3093, - "command": 3094, - "staff": 3095, - "skin": 3096, - "foreign": 3097, - "opening": 3098, - "governor": 3099, - "okay": 3100, - "medal": 3101, - "kill": 3102, - "sun": 3103, - "cover": 3104, - "job": 3105, - "1985": 3106, - "introduced": 3107, - "chest": 3108, - "hell": 3109, - "feeling": 3110, - "##ies": 3111, - "success": 3112, - "meet": 3113, - "reason": 3114, - "standard": 3115, - "meeting": 3116, - "novel": 3117, - "1984": 3118, - "trade": 3119, - "source": 3120, - "buildings": 3121, - "##land": 3122, - "rose": 3123, - "guy": 3124, - "goal": 3125, - "##ur": 3126, - "chapter": 3127, - "native": 3128, - "husband": 3129, - "previously": 3130, - "unit": 3131, - "limited": 3132, - "entered": 3133, - "weeks": 3134, - "producer": 3135, - "operations": 3136, - "mountain": 3137, - "takes": 3138, - "covered": 3139, - "forced": 3140, - "related": 3141, - "roman": 3142, - "complete": 3143, - "successful": 3144, - "key": 3145, - "texas": 3146, - "cold": 3147, - "##ya": 3148, - "channel": 3149, - "1980": 3150, - "traditional": 3151, - "films": 3152, - "dance": 3153, - "clear": 3154, - "approximately": 3155, - "500": 3156, - "nine": 3157, - "van": 3158, - "prince": 3159, - "question": 3160, - "active": 3161, - "tracks": 3162, - "ireland": 3163, - "regional": 3164, - "silver": 3165, - "author": 3166, - "personal": 3167, - "sense": 3168, - "operation": 3169, - "##ine": 3170, - "economic": 3171, - "1983": 3172, - "holding": 3173, - "twenty": 3174, - "isbn": 3175, - "additional": 3176, - "speed": 3177, - "hour": 3178, - "edition": 3179, - "regular": 3180, - "historic": 3181, - "places": 3182, - "whom": 3183, - "shook": 3184, - "movie": 3185, - "km²": 3186, - "secretary": 3187, - "prior": 3188, - "report": 3189, - "chicago": 3190, - "read": 3191, - "foundation": 3192, - "view": 3193, - "engine": 3194, - "scored": 3195, - "1982": 3196, - "units": 3197, - "ask": 3198, - "airport": 3199, - "property": 3200, - "ready": 3201, - "immediately": 3202, - "lady": 3203, - "month": 3204, - "listed": 3205, - "contract": 3206, - "##de": 3207, - "manager": 3208, - "themselves": 3209, - "lines": 3210, - "##ki": 3211, - "navy": 3212, - "writer": 3213, - "meant": 3214, - "##ts": 3215, - "runs": 3216, - "##ro": 3217, - "practice": 3218, - "championships": 3219, - "singer": 3220, - "glass": 3221, - "commission": 3222, - "required": 3223, - "forest": 3224, - "starting": 3225, - "culture": 3226, - "generally": 3227, - "giving": 3228, - "access": 3229, - "attended": 3230, - "test": 3231, - "couple": 3232, - "stand": 3233, - "catholic": 3234, - "martin": 3235, - "caught": 3236, - "executive": 3237, - "##less": 3238, - "eye": 3239, - "##ey": 3240, - "thinking": 3241, - "chair": 3242, - "quite": 3243, - "shoulder": 3244, - "1979": 3245, - "hope": 3246, - "decision": 3247, - "plays": 3248, - "defeated": 3249, - "municipality": 3250, - "whether": 3251, - "structure": 3252, - "offered": 3253, - "slowly": 3254, - "pain": 3255, - "ice": 3256, - "direction": 3257, - "##ion": 3258, - "paper": 3259, - "mission": 3260, - "1981": 3261, - "mostly": 3262, - "200": 3263, - "noted": 3264, - "individual": 3265, - "managed": 3266, - "nature": 3267, - "lives": 3268, - "plant": 3269, - "##ha": 3270, - "helped": 3271, - "except": 3272, - "studied": 3273, - "computer": 3274, - "figure": 3275, - "relationship": 3276, - "issue": 3277, - "significant": 3278, - "loss": 3279, - "die": 3280, - "smiled": 3281, - "gun": 3282, - "ago": 3283, - "highest": 3284, - "1972": 3285, - "##am": 3286, - "male": 3287, - "bring": 3288, - "goals": 3289, - "mexico": 3290, - "problem": 3291, - "distance": 3292, - "commercial": 3293, - "completely": 3294, - "location": 3295, - "annual": 3296, - "famous": 3297, - "drive": 3298, - "1976": 3299, - "neck": 3300, - "1978": 3301, - "surface": 3302, - "caused": 3303, - "italy": 3304, - "understand": 3305, - "greek": 3306, - "highway": 3307, - "wrong": 3308, - "hotel": 3309, - "comes": 3310, - "appearance": 3311, - "joseph": 3312, - "double": 3313, - "issues": 3314, - "musical": 3315, - "companies": 3316, - "castle": 3317, - "income": 3318, - "review": 3319, - "assembly": 3320, - "bass": 3321, - "initially": 3322, - "parliament": 3323, - "artists": 3324, - "experience": 3325, - "1974": 3326, - "particular": 3327, - "walk": 3328, - "foot": 3329, - "engineering": 3330, - "talking": 3331, - "window": 3332, - "dropped": 3333, - "##ter": 3334, - "miss": 3335, - "baby": 3336, - "boys": 3337, - "break": 3338, - "1975": 3339, - "stars": 3340, - "edge": 3341, - "remember": 3342, - "policy": 3343, - "carried": 3344, - "train": 3345, - "stadium": 3346, - "bar": 3347, - "sex": 3348, - "angeles": 3349, - "evidence": 3350, - "##ge": 3351, - "becoming": 3352, - "assistant": 3353, - "soviet": 3354, - "1977": 3355, - "upper": 3356, - "step": 3357, - "wing": 3358, - "1970": 3359, - "youth": 3360, - "financial": 3361, - "reach": 3362, - "##ll": 3363, - "actor": 3364, - "numerous": 3365, - "##se": 3366, - "##st": 3367, - "nodded": 3368, - "arrived": 3369, - "##ation": 3370, - "minute": 3371, - "##nt": 3372, - "believed": 3373, - "sorry": 3374, - "complex": 3375, - "beautiful": 3376, - "victory": 3377, - "associated": 3378, - "temple": 3379, - "1968": 3380, - "1973": 3381, - "chance": 3382, - "perhaps": 3383, - "metal": 3384, - "##son": 3385, - "1945": 3386, - "bishop": 3387, - "##et": 3388, - "lee": 3389, - "launched": 3390, - "particularly": 3391, - "tree": 3392, - "le": 3393, - "retired": 3394, - "subject": 3395, - "prize": 3396, - "contains": 3397, - "yeah": 3398, - "theory": 3399, - "empire": 3400, - "##ce": 3401, - "suddenly": 3402, - "waiting": 3403, - "trust": 3404, - "recording": 3405, - "##to": 3406, - "happy": 3407, - "terms": 3408, - "camp": 3409, - "champion": 3410, - "1971": 3411, - "religious": 3412, - "pass": 3413, - "zealand": 3414, - "names": 3415, - "2nd": 3416, - "port": 3417, - "ancient": 3418, - "tom": 3419, - "corner": 3420, - "represented": 3421, - "watch": 3422, - "legal": 3423, - "anti": 3424, - "justice": 3425, - "cause": 3426, - "watched": 3427, - "brothers": 3428, - "45": 3429, - "material": 3430, - "changes": 3431, - "simply": 3432, - "response": 3433, - "louis": 3434, - "fast": 3435, - "##ting": 3436, - "answer": 3437, - "60": 3438, - "historical": 3439, - "1969": 3440, - "stories": 3441, - "straight": 3442, - "create": 3443, - "feature": 3444, - "increased": 3445, - "rate": 3446, - "administration": 3447, - "virginia": 3448, - "el": 3449, - "activities": 3450, - "cultural": 3451, - "overall": 3452, - "winner": 3453, - "programs": 3454, - "basketball": 3455, - "legs": 3456, - "guard": 3457, - "beyond": 3458, - "cast": 3459, - "doctor": 3460, - "mm": 3461, - "flight": 3462, - "results": 3463, - "remains": 3464, - "cost": 3465, - "effect": 3466, - "winter": 3467, - "##ble": 3468, - "larger": 3469, - "islands": 3470, - "problems": 3471, - "chairman": 3472, - "grew": 3473, - "commander": 3474, - "isn": 3475, - "1967": 3476, - "pay": 3477, - "failed": 3478, - "selected": 3479, - "hurt": 3480, - "fort": 3481, - "box": 3482, - "regiment": 3483, - "majority": 3484, - "journal": 3485, - "35": 3486, - "edward": 3487, - "plans": 3488, - "##ke": 3489, - "##ni": 3490, - "shown": 3491, - "pretty": 3492, - "irish": 3493, - "characters": 3494, - "directly": 3495, - "scene": 3496, - "likely": 3497, - "operated": 3498, - "allow": 3499, - "spring": 3500, - "##j": 3501, - "junior": 3502, - "matches": 3503, - "looks": 3504, - "mike": 3505, - "houses": 3506, - "fellow": 3507, - "##tion": 3508, - "beach": 3509, - "marriage": 3510, - "##ham": 3511, - "##ive": 3512, - "rules": 3513, - "oil": 3514, - "65": 3515, - "florida": 3516, - "expected": 3517, - "nearby": 3518, - "congress": 3519, - "sam": 3520, - "peace": 3521, - "recent": 3522, - "iii": 3523, - "wait": 3524, - "subsequently": 3525, - "cell": 3526, - "##do": 3527, - "variety": 3528, - "serving": 3529, - "agreed": 3530, - "please": 3531, - "poor": 3532, - "joe": 3533, - "pacific": 3534, - "attempt": 3535, - "wood": 3536, - "democratic": 3537, - "piece": 3538, - "prime": 3539, - "##ca": 3540, - "rural": 3541, - "mile": 3542, - "touch": 3543, - "appears": 3544, - "township": 3545, - "1964": 3546, - "1966": 3547, - "soldiers": 3548, - "##men": 3549, - "##ized": 3550, - "1965": 3551, - "pennsylvania": 3552, - "closer": 3553, - "fighting": 3554, - "claimed": 3555, - "score": 3556, - "jones": 3557, - "physical": 3558, - "editor": 3559, - "##ous": 3560, - "filled": 3561, - "genus": 3562, - "specific": 3563, - "sitting": 3564, - "super": 3565, - "mom": 3566, - "##va": 3567, - "therefore": 3568, - "supported": 3569, - "status": 3570, - "fear": 3571, - "cases": 3572, - "store": 3573, - "meaning": 3574, - "wales": 3575, - "minor": 3576, - "spain": 3577, - "tower": 3578, - "focus": 3579, - "vice": 3580, - "frank": 3581, - "follow": 3582, - "parish": 3583, - "separate": 3584, - "golden": 3585, - "horse": 3586, - "fifth": 3587, - "remaining": 3588, - "branch": 3589, - "32": 3590, - "presented": 3591, - "stared": 3592, - "##id": 3593, - "uses": 3594, - "secret": 3595, - "forms": 3596, - "##co": 3597, - "baseball": 3598, - "exactly": 3599, - "##ck": 3600, - "choice": 3601, - "note": 3602, - "discovered": 3603, - "travel": 3604, - "composed": 3605, - "truth": 3606, - "russia": 3607, - "ball": 3608, - "color": 3609, - "kiss": 3610, - "dad": 3611, - "wind": 3612, - "continue": 3613, - "ring": 3614, - "referred": 3615, - "numbers": 3616, - "digital": 3617, - "greater": 3618, - "##ns": 3619, - "metres": 3620, - "slightly": 3621, - "direct": 3622, - "increase": 3623, - "1960": 3624, - "responsible": 3625, - "crew": 3626, - "rule": 3627, - "trees": 3628, - "troops": 3629, - "##no": 3630, - "broke": 3631, - "goes": 3632, - "individuals": 3633, - "hundred": 3634, - "weight": 3635, - "creek": 3636, - "sleep": 3637, - "memory": 3638, - "defense": 3639, - "provides": 3640, - "ordered": 3641, - "code": 3642, - "value": 3643, - "jewish": 3644, - "windows": 3645, - "1944": 3646, - "safe": 3647, - "judge": 3648, - "whatever": 3649, - "corps": 3650, - "realized": 3651, - "growing": 3652, - "pre": 3653, - "##ga": 3654, - "cities": 3655, - "alexander": 3656, - "gaze": 3657, - "lies": 3658, - "spread": 3659, - "scott": 3660, - "letter": 3661, - "showed": 3662, - "situation": 3663, - "mayor": 3664, - "transport": 3665, - "watching": 3666, - "workers": 3667, - "extended": 3668, - "##li": 3669, - "expression": 3670, - "normal": 3671, - "##ment": 3672, - "chart": 3673, - "multiple": 3674, - "border": 3675, - "##ba": 3676, - "host": 3677, - "##ner": 3678, - "daily": 3679, - "mrs": 3680, - "walls": 3681, - "piano": 3682, - "##ko": 3683, - "heat": 3684, - "cannot": 3685, - "##ate": 3686, - "earned": 3687, - "products": 3688, - "drama": 3689, - "era": 3690, - "authority": 3691, - "seasons": 3692, - "join": 3693, - "grade": 3694, - "##io": 3695, - "sign": 3696, - "difficult": 3697, - "machine": 3698, - "1963": 3699, - "territory": 3700, - "mainly": 3701, - "##wood": 3702, - "stations": 3703, - "squadron": 3704, - "1962": 3705, - "stepped": 3706, - "iron": 3707, - "19th": 3708, - "##led": 3709, - "serve": 3710, - "appear": 3711, - "sky": 3712, - "speak": 3713, - "broken": 3714, - "charge": 3715, - "knowledge": 3716, - "kilometres": 3717, - "removed": 3718, - "ships": 3719, - "article": 3720, - "campus": 3721, - "simple": 3722, - "##ty": 3723, - "pushed": 3724, - "britain": 3725, - "##ve": 3726, - "leaves": 3727, - "recently": 3728, - "cd": 3729, - "soft": 3730, - "boston": 3731, - "latter": 3732, - "easy": 3733, - "acquired": 3734, - "poland": 3735, - "##sa": 3736, - "quality": 3737, - "officers": 3738, - "presence": 3739, - "planned": 3740, - "nations": 3741, - "mass": 3742, - "broadcast": 3743, - "jean": 3744, - "share": 3745, - "image": 3746, - "influence": 3747, - "wild": 3748, - "offer": 3749, - "emperor": 3750, - "electric": 3751, - "reading": 3752, - "headed": 3753, - "ability": 3754, - "promoted": 3755, - "yellow": 3756, - "ministry": 3757, - "1942": 3758, - "throat": 3759, - "smaller": 3760, - "politician": 3761, - "##by": 3762, - "latin": 3763, - "spoke": 3764, - "cars": 3765, - "williams": 3766, - "males": 3767, - "lack": 3768, - "pop": 3769, - "80": 3770, - "##ier": 3771, - "acting": 3772, - "seeing": 3773, - "consists": 3774, - "##ti": 3775, - "estate": 3776, - "1961": 3777, - "pressure": 3778, - "johnson": 3779, - "newspaper": 3780, - "jr": 3781, - "chris": 3782, - "olympics": 3783, - "online": 3784, - "conditions": 3785, - "beat": 3786, - "elements": 3787, - "walking": 3788, - "vote": 3789, - "##field": 3790, - "needs": 3791, - "carolina": 3792, - "text": 3793, - "featuring": 3794, - "global": 3795, - "block": 3796, - "shirt": 3797, - "levels": 3798, - "francisco": 3799, - "purpose": 3800, - "females": 3801, - "et": 3802, - "dutch": 3803, - "duke": 3804, - "ahead": 3805, - "gas": 3806, - "twice": 3807, - "safety": 3808, - "serious": 3809, - "turning": 3810, - "highly": 3811, - "lieutenant": 3812, - "firm": 3813, - "maria": 3814, - "amount": 3815, - "mixed": 3816, - "daniel": 3817, - "proposed": 3818, - "perfect": 3819, - "agreement": 3820, - "affairs": 3821, - "3rd": 3822, - "seconds": 3823, - "contemporary": 3824, - "paid": 3825, - "1943": 3826, - "prison": 3827, - "save": 3828, - "kitchen": 3829, - "label": 3830, - "administrative": 3831, - "intended": 3832, - "constructed": 3833, - "academic": 3834, - "nice": 3835, - "teacher": 3836, - "races": 3837, - "1956": 3838, - "formerly": 3839, - "corporation": 3840, - "ben": 3841, - "nation": 3842, - "issued": 3843, - "shut": 3844, - "1958": 3845, - "drums": 3846, - "housing": 3847, - "victoria": 3848, - "seems": 3849, - "opera": 3850, - "1959": 3851, - "graduated": 3852, - "function": 3853, - "von": 3854, - "mentioned": 3855, - "picked": 3856, - "build": 3857, - "recognized": 3858, - "shortly": 3859, - "protection": 3860, - "picture": 3861, - "notable": 3862, - "exchange": 3863, - "elections": 3864, - "1980s": 3865, - "loved": 3866, - "percent": 3867, - "racing": 3868, - "fish": 3869, - "elizabeth": 3870, - "garden": 3871, - "volume": 3872, - "hockey": 3873, - "1941": 3874, - "beside": 3875, - "settled": 3876, - "##ford": 3877, - "1940": 3878, - "competed": 3879, - "replied": 3880, - "drew": 3881, - "1948": 3882, - "actress": 3883, - "marine": 3884, - "scotland": 3885, - "steel": 3886, - "glanced": 3887, - "farm": 3888, - "steve": 3889, - "1957": 3890, - "risk": 3891, - "tonight": 3892, - "positive": 3893, - "magic": 3894, - "singles": 3895, - "effects": 3896, - "gray": 3897, - "screen": 3898, - "dog": 3899, - "##ja": 3900, - "residents": 3901, - "bus": 3902, - "sides": 3903, - "none": 3904, - "secondary": 3905, - "literature": 3906, - "polish": 3907, - "destroyed": 3908, - "flying": 3909, - "founder": 3910, - "households": 3911, - "1939": 3912, - "lay": 3913, - "reserve": 3914, - "usa": 3915, - "gallery": 3916, - "##ler": 3917, - "1946": 3918, - "industrial": 3919, - "younger": 3920, - "approach": 3921, - "appearances": 3922, - "urban": 3923, - "ones": 3924, - "1950": 3925, - "finish": 3926, - "avenue": 3927, - "powerful": 3928, - "fully": 3929, - "growth": 3930, - "page": 3931, - "honor": 3932, - "jersey": 3933, - "projects": 3934, - "advanced": 3935, - "revealed": 3936, - "basic": 3937, - "90": 3938, - "infantry": 3939, - "pair": 3940, - "equipment": 3941, - "visit": 3942, - "33": 3943, - "evening": 3944, - "search": 3945, - "grant": 3946, - "effort": 3947, - "solo": 3948, - "treatment": 3949, - "buried": 3950, - "republican": 3951, - "primarily": 3952, - "bottom": 3953, - "owner": 3954, - "1970s": 3955, - "israel": 3956, - "gives": 3957, - "jim": 3958, - "dream": 3959, - "bob": 3960, - "remain": 3961, - "spot": 3962, - "70": 3963, - "notes": 3964, - "produce": 3965, - "champions": 3966, - "contact": 3967, - "ed": 3968, - "soul": 3969, - "accepted": 3970, - "ways": 3971, - "del": 3972, - "##ally": 3973, - "losing": 3974, - "split": 3975, - "price": 3976, - "capacity": 3977, - "basis": 3978, - "trial": 3979, - "questions": 3980, - "##ina": 3981, - "1955": 3982, - "20th": 3983, - "guess": 3984, - "officially": 3985, - "memorial": 3986, - "naval": 3987, - "initial": 3988, - "##ization": 3989, - "whispered": 3990, - "median": 3991, - "engineer": 3992, - "##ful": 3993, - "sydney": 3994, - "##go": 3995, - "columbia": 3996, - "strength": 3997, - "300": 3998, - "1952": 3999, - "tears": 4000, - "senate": 4001, - "00": 4002, - "card": 4003, - "asian": 4004, - "agent": 4005, - "1947": 4006, - "software": 4007, - "44": 4008, - "draw": 4009, - "warm": 4010, - "supposed": 4011, - "com": 4012, - "pro": 4013, - "##il": 4014, - "transferred": 4015, - "leaned": 4016, - "##at": 4017, - "candidate": 4018, - "escape": 4019, - "mountains": 4020, - "asia": 4021, - "potential": 4022, - "activity": 4023, - "entertainment": 4024, - "seem": 4025, - "traffic": 4026, - "jackson": 4027, - "murder": 4028, - "36": 4029, - "slow": 4030, - "product": 4031, - "orchestra": 4032, - "haven": 4033, - "agency": 4034, - "bbc": 4035, - "taught": 4036, - "website": 4037, - "comedy": 4038, - "unable": 4039, - "storm": 4040, - "planning": 4041, - "albums": 4042, - "rugby": 4043, - "environment": 4044, - "scientific": 4045, - "grabbed": 4046, - "protect": 4047, - "##hi": 4048, - "boat": 4049, - "typically": 4050, - "1954": 4051, - "1953": 4052, - "damage": 4053, - "principal": 4054, - "divided": 4055, - "dedicated": 4056, - "mount": 4057, - "ohio": 4058, - "##berg": 4059, - "pick": 4060, - "fought": 4061, - "driver": 4062, - "##der": 4063, - "empty": 4064, - "shoulders": 4065, - "sort": 4066, - "thank": 4067, - "berlin": 4068, - "prominent": 4069, - "account": 4070, - "freedom": 4071, - "necessary": 4072, - "efforts": 4073, - "alex": 4074, - "headquarters": 4075, - "follows": 4076, - "alongside": 4077, - "des": 4078, - "simon": 4079, - "andrew": 4080, - "suggested": 4081, - "operating": 4082, - "learning": 4083, - "steps": 4084, - "1949": 4085, - "sweet": 4086, - "technical": 4087, - "begin": 4088, - "easily": 4089, - "34": 4090, - "teeth": 4091, - "speaking": 4092, - "settlement": 4093, - "scale": 4094, - "##sh": 4095, - "renamed": 4096, - "ray": 4097, - "max": 4098, - "enemy": 4099, - "semi": 4100, - "joint": 4101, - "compared": 4102, - "##rd": 4103, - "scottish": 4104, - "leadership": 4105, - "analysis": 4106, - "offers": 4107, - "georgia": 4108, - "pieces": 4109, - "captured": 4110, - "animal": 4111, - "deputy": 4112, - "guest": 4113, - "organized": 4114, - "##lin": 4115, - "tony": 4116, - "combined": 4117, - "method": 4118, - "challenge": 4119, - "1960s": 4120, - "huge": 4121, - "wants": 4122, - "battalion": 4123, - "sons": 4124, - "rise": 4125, - "crime": 4126, - "types": 4127, - "facilities": 4128, - "telling": 4129, - "path": 4130, - "1951": 4131, - "platform": 4132, - "sit": 4133, - "1990s": 4134, - "##lo": 4135, - "tells": 4136, - "assigned": 4137, - "rich": 4138, - "pull": 4139, - "##ot": 4140, - "commonly": 4141, - "alive": 4142, - "##za": 4143, - "letters": 4144, - "concept": 4145, - "conducted": 4146, - "wearing": 4147, - "happen": 4148, - "bought": 4149, - "becomes": 4150, - "holy": 4151, - "gets": 4152, - "ocean": 4153, - "defeat": 4154, - "languages": 4155, - "purchased": 4156, - "coffee": 4157, - "occurred": 4158, - "titled": 4159, - "##q": 4160, - "declared": 4161, - "applied": 4162, - "sciences": 4163, - "concert": 4164, - "sounds": 4165, - "jazz": 4166, - "brain": 4167, - "##me": 4168, - "painting": 4169, - "fleet": 4170, - "tax": 4171, - "nick": 4172, - "##ius": 4173, - "michigan": 4174, - "count": 4175, - "animals": 4176, - "leaders": 4177, - "episodes": 4178, - "##line": 4179, - "content": 4180, - "##den": 4181, - "birth": 4182, - "##it": 4183, - "clubs": 4184, - "64": 4185, - "palace": 4186, - "critical": 4187, - "refused": 4188, - "fair": 4189, - "leg": 4190, - "laughed": 4191, - "returning": 4192, - "surrounding": 4193, - "participated": 4194, - "formation": 4195, - "lifted": 4196, - "pointed": 4197, - "connected": 4198, - "rome": 4199, - "medicine": 4200, - "laid": 4201, - "taylor": 4202, - "santa": 4203, - "powers": 4204, - "adam": 4205, - "tall": 4206, - "shared": 4207, - "focused": 4208, - "knowing": 4209, - "yards": 4210, - "entrance": 4211, - "falls": 4212, - "##wa": 4213, - "calling": 4214, - "##ad": 4215, - "sources": 4216, - "chosen": 4217, - "beneath": 4218, - "resources": 4219, - "yard": 4220, - "##ite": 4221, - "nominated": 4222, - "silence": 4223, - "zone": 4224, - "defined": 4225, - "##que": 4226, - "gained": 4227, - "thirty": 4228, - "38": 4229, - "bodies": 4230, - "moon": 4231, - "##ard": 4232, - "adopted": 4233, - "christmas": 4234, - "widely": 4235, - "register": 4236, - "apart": 4237, - "iran": 4238, - "premier": 4239, - "serves": 4240, - "du": 4241, - "unknown": 4242, - "parties": 4243, - "##les": 4244, - "generation": 4245, - "##ff": 4246, - "continues": 4247, - "quick": 4248, - "fields": 4249, - "brigade": 4250, - "quiet": 4251, - "teaching": 4252, - "clothes": 4253, - "impact": 4254, - "weapons": 4255, - "partner": 4256, - "flat": 4257, - "theater": 4258, - "supreme": 4259, - "1938": 4260, - "37": 4261, - "relations": 4262, - "##tor": 4263, - "plants": 4264, - "suffered": 4265, - "1936": 4266, - "wilson": 4267, - "kids": 4268, - "begins": 4269, - "##age": 4270, - "1918": 4271, - "seats": 4272, - "armed": 4273, - "internet": 4274, - "models": 4275, - "worth": 4276, - "laws": 4277, - "400": 4278, - "communities": 4279, - "classes": 4280, - "background": 4281, - "knows": 4282, - "thanks": 4283, - "quarter": 4284, - "reaching": 4285, - "humans": 4286, - "carry": 4287, - "killing": 4288, - "format": 4289, - "kong": 4290, - "hong": 4291, - "setting": 4292, - "75": 4293, - "architecture": 4294, - "disease": 4295, - "railroad": 4296, - "inc": 4297, - "possibly": 4298, - "wish": 4299, - "arthur": 4300, - "thoughts": 4301, - "harry": 4302, - "doors": 4303, - "density": 4304, - "##di": 4305, - "crowd": 4306, - "illinois": 4307, - "stomach": 4308, - "tone": 4309, - "unique": 4310, - "reports": 4311, - "anyway": 4312, - "##ir": 4313, - "liberal": 4314, - "der": 4315, - "vehicle": 4316, - "thick": 4317, - "dry": 4318, - "drug": 4319, - "faced": 4320, - "largely": 4321, - "facility": 4322, - "theme": 4323, - "holds": 4324, - "creation": 4325, - "strange": 4326, - "colonel": 4327, - "##mi": 4328, - "revolution": 4329, - "bell": 4330, - "politics": 4331, - "turns": 4332, - "silent": 4333, - "rail": 4334, - "relief": 4335, - "independence": 4336, - "combat": 4337, - "shape": 4338, - "write": 4339, - "determined": 4340, - "sales": 4341, - "learned": 4342, - "4th": 4343, - "finger": 4344, - "oxford": 4345, - "providing": 4346, - "1937": 4347, - "heritage": 4348, - "fiction": 4349, - "situated": 4350, - "designated": 4351, - "allowing": 4352, - "distribution": 4353, - "hosted": 4354, - "##est": 4355, - "sight": 4356, - "interview": 4357, - "estimated": 4358, - "reduced": 4359, - "##ria": 4360, - "toronto": 4361, - "footballer": 4362, - "keeping": 4363, - "guys": 4364, - "damn": 4365, - "claim": 4366, - "motion": 4367, - "sport": 4368, - "sixth": 4369, - "stayed": 4370, - "##ze": 4371, - "en": 4372, - "rear": 4373, - "receive": 4374, - "handed": 4375, - "twelve": 4376, - "dress": 4377, - "audience": 4378, - "granted": 4379, - "brazil": 4380, - "##well": 4381, - "spirit": 4382, - "##ated": 4383, - "noticed": 4384, - "etc": 4385, - "olympic": 4386, - "representative": 4387, - "eric": 4388, - "tight": 4389, - "trouble": 4390, - "reviews": 4391, - "drink": 4392, - "vampire": 4393, - "missing": 4394, - "roles": 4395, - "ranked": 4396, - "newly": 4397, - "household": 4398, - "finals": 4399, - "wave": 4400, - "critics": 4401, - "##ee": 4402, - "phase": 4403, - "massachusetts": 4404, - "pilot": 4405, - "unlike": 4406, - "philadelphia": 4407, - "bright": 4408, - "guns": 4409, - "crown": 4410, - "organizations": 4411, - "roof": 4412, - "42": 4413, - "respectively": 4414, - "clearly": 4415, - "tongue": 4416, - "marked": 4417, - "circle": 4418, - "fox": 4419, - "korea": 4420, - "bronze": 4421, - "brian": 4422, - "expanded": 4423, - "sexual": 4424, - "supply": 4425, - "yourself": 4426, - "inspired": 4427, - "labour": 4428, - "fc": 4429, - "##ah": 4430, - "reference": 4431, - "vision": 4432, - "draft": 4433, - "connection": 4434, - "brand": 4435, - "reasons": 4436, - "1935": 4437, - "classic": 4438, - "driving": 4439, - "trip": 4440, - "jesus": 4441, - "cells": 4442, - "entry": 4443, - "1920": 4444, - "neither": 4445, - "trail": 4446, - "claims": 4447, - "atlantic": 4448, - "orders": 4449, - "labor": 4450, - "nose": 4451, - "afraid": 4452, - "identified": 4453, - "intelligence": 4454, - "calls": 4455, - "cancer": 4456, - "attacked": 4457, - "passing": 4458, - "stephen": 4459, - "positions": 4460, - "imperial": 4461, - "grey": 4462, - "jason": 4463, - "39": 4464, - "sunday": 4465, - "48": 4466, - "swedish": 4467, - "avoid": 4468, - "extra": 4469, - "uncle": 4470, - "message": 4471, - "covers": 4472, - "allows": 4473, - "surprise": 4474, - "materials": 4475, - "fame": 4476, - "hunter": 4477, - "##ji": 4478, - "1930": 4479, - "citizens": 4480, - "figures": 4481, - "davis": 4482, - "environmental": 4483, - "confirmed": 4484, - "shit": 4485, - "titles": 4486, - "di": 4487, - "performing": 4488, - "difference": 4489, - "acts": 4490, - "attacks": 4491, - "##ov": 4492, - "existing": 4493, - "votes": 4494, - "opportunity": 4495, - "nor": 4496, - "shop": 4497, - "entirely": 4498, - "trains": 4499, - "opposite": 4500, - "pakistan": 4501, - "##pa": 4502, - "develop": 4503, - "resulted": 4504, - "representatives": 4505, - "actions": 4506, - "reality": 4507, - "pressed": 4508, - "##ish": 4509, - "barely": 4510, - "wine": 4511, - "conversation": 4512, - "faculty": 4513, - "northwest": 4514, - "ends": 4515, - "documentary": 4516, - "nuclear": 4517, - "stock": 4518, - "grace": 4519, - "sets": 4520, - "eat": 4521, - "alternative": 4522, - "##ps": 4523, - "bag": 4524, - "resulting": 4525, - "creating": 4526, - "surprised": 4527, - "cemetery": 4528, - "1919": 4529, - "drop": 4530, - "finding": 4531, - "sarah": 4532, - "cricket": 4533, - "streets": 4534, - "tradition": 4535, - "ride": 4536, - "1933": 4537, - "exhibition": 4538, - "target": 4539, - "ear": 4540, - "explained": 4541, - "rain": 4542, - "composer": 4543, - "injury": 4544, - "apartment": 4545, - "municipal": 4546, - "educational": 4547, - "occupied": 4548, - "netherlands": 4549, - "clean": 4550, - "billion": 4551, - "constitution": 4552, - "learn": 4553, - "1914": 4554, - "maximum": 4555, - "classical": 4556, - "francis": 4557, - "lose": 4558, - "opposition": 4559, - "jose": 4560, - "ontario": 4561, - "bear": 4562, - "core": 4563, - "hills": 4564, - "rolled": 4565, - "ending": 4566, - "drawn": 4567, - "permanent": 4568, - "fun": 4569, - "##tes": 4570, - "##lla": 4571, - "lewis": 4572, - "sites": 4573, - "chamber": 4574, - "ryan": 4575, - "##way": 4576, - "scoring": 4577, - "height": 4578, - "1934": 4579, - "##house": 4580, - "lyrics": 4581, - "staring": 4582, - "55": 4583, - "officials": 4584, - "1917": 4585, - "snow": 4586, - "oldest": 4587, - "##tic": 4588, - "orange": 4589, - "##ger": 4590, - "qualified": 4591, - "interior": 4592, - "apparently": 4593, - "succeeded": 4594, - "thousand": 4595, - "dinner": 4596, - "lights": 4597, - "existence": 4598, - "fans": 4599, - "heavily": 4600, - "41": 4601, - "greatest": 4602, - "conservative": 4603, - "send": 4604, - "bowl": 4605, - "plus": 4606, - "enter": 4607, - "catch": 4608, - "##un": 4609, - "economy": 4610, - "duty": 4611, - "1929": 4612, - "speech": 4613, - "authorities": 4614, - "princess": 4615, - "performances": 4616, - "versions": 4617, - "shall": 4618, - "graduate": 4619, - "pictures": 4620, - "effective": 4621, - "remembered": 4622, - "poetry": 4623, - "desk": 4624, - "crossed": 4625, - "starring": 4626, - "starts": 4627, - "passenger": 4628, - "sharp": 4629, - "##ant": 4630, - "acres": 4631, - "ass": 4632, - "weather": 4633, - "falling": 4634, - "rank": 4635, - "fund": 4636, - "supporting": 4637, - "check": 4638, - "adult": 4639, - "publishing": 4640, - "heads": 4641, - "cm": 4642, - "southeast": 4643, - "lane": 4644, - "##burg": 4645, - "application": 4646, - "bc": 4647, - "##ura": 4648, - "les": 4649, - "condition": 4650, - "transfer": 4651, - "prevent": 4652, - "display": 4653, - "ex": 4654, - "regions": 4655, - "earl": 4656, - "federation": 4657, - "cool": 4658, - "relatively": 4659, - "answered": 4660, - "besides": 4661, - "1928": 4662, - "obtained": 4663, - "portion": 4664, - "##town": 4665, - "mix": 4666, - "##ding": 4667, - "reaction": 4668, - "liked": 4669, - "dean": 4670, - "express": 4671, - "peak": 4672, - "1932": 4673, - "##tte": 4674, - "counter": 4675, - "religion": 4676, - "chain": 4677, - "rare": 4678, - "miller": 4679, - "convention": 4680, - "aid": 4681, - "lie": 4682, - "vehicles": 4683, - "mobile": 4684, - "perform": 4685, - "squad": 4686, - "wonder": 4687, - "lying": 4688, - "crazy": 4689, - "sword": 4690, - "##ping": 4691, - "attempted": 4692, - "centuries": 4693, - "weren": 4694, - "philosophy": 4695, - "category": 4696, - "##ize": 4697, - "anna": 4698, - "interested": 4699, - "47": 4700, - "sweden": 4701, - "wolf": 4702, - "frequently": 4703, - "abandoned": 4704, - "kg": 4705, - "literary": 4706, - "alliance": 4707, - "task": 4708, - "entitled": 4709, - "##ay": 4710, - "threw": 4711, - "promotion": 4712, - "factory": 4713, - "tiny": 4714, - "soccer": 4715, - "visited": 4716, - "matt": 4717, - "fm": 4718, - "achieved": 4719, - "52": 4720, - "defence": 4721, - "internal": 4722, - "persian": 4723, - "43": 4724, - "methods": 4725, - "##ging": 4726, - "arrested": 4727, - "otherwise": 4728, - "cambridge": 4729, - "programming": 4730, - "villages": 4731, - "elementary": 4732, - "districts": 4733, - "rooms": 4734, - "criminal": 4735, - "conflict": 4736, - "worry": 4737, - "trained": 4738, - "1931": 4739, - "attempts": 4740, - "waited": 4741, - "signal": 4742, - "bird": 4743, - "truck": 4744, - "subsequent": 4745, - "programme": 4746, - "##ol": 4747, - "ad": 4748, - "49": 4749, - "communist": 4750, - "details": 4751, - "faith": 4752, - "sector": 4753, - "patrick": 4754, - "carrying": 4755, - "laugh": 4756, - "##ss": 4757, - "controlled": 4758, - "korean": 4759, - "showing": 4760, - "origin": 4761, - "fuel": 4762, - "evil": 4763, - "1927": 4764, - "##ent": 4765, - "brief": 4766, - "identity": 4767, - "darkness": 4768, - "address": 4769, - "pool": 4770, - "missed": 4771, - "publication": 4772, - "web": 4773, - "planet": 4774, - "ian": 4775, - "anne": 4776, - "wings": 4777, - "invited": 4778, - "##tt": 4779, - "briefly": 4780, - "standards": 4781, - "kissed": 4782, - "##be": 4783, - "ideas": 4784, - "climate": 4785, - "causing": 4786, - "walter": 4787, - "worse": 4788, - "albert": 4789, - "articles": 4790, - "winners": 4791, - "desire": 4792, - "aged": 4793, - "northeast": 4794, - "dangerous": 4795, - "gate": 4796, - "doubt": 4797, - "1922": 4798, - "wooden": 4799, - "multi": 4800, - "##ky": 4801, - "poet": 4802, - "rising": 4803, - "funding": 4804, - "46": 4805, - "communications": 4806, - "communication": 4807, - "violence": 4808, - "copies": 4809, - "prepared": 4810, - "ford": 4811, - "investigation": 4812, - "skills": 4813, - "1924": 4814, - "pulling": 4815, - "electronic": 4816, - "##ak": 4817, - "##ial": 4818, - "##han": 4819, - "containing": 4820, - "ultimately": 4821, - "offices": 4822, - "singing": 4823, - "understanding": 4824, - "restaurant": 4825, - "tomorrow": 4826, - "fashion": 4827, - "christ": 4828, - "ward": 4829, - "da": 4830, - "pope": 4831, - "stands": 4832, - "5th": 4833, - "flow": 4834, - "studios": 4835, - "aired": 4836, - "commissioned": 4837, - "contained": 4838, - "exist": 4839, - "fresh": 4840, - "americans": 4841, - "##per": 4842, - "wrestling": 4843, - "approved": 4844, - "kid": 4845, - "employed": 4846, - "respect": 4847, - "suit": 4848, - "1925": 4849, - "angel": 4850, - "asking": 4851, - "increasing": 4852, - "frame": 4853, - "angry": 4854, - "selling": 4855, - "1950s": 4856, - "thin": 4857, - "finds": 4858, - "##nd": 4859, - "temperature": 4860, - "statement": 4861, - "ali": 4862, - "explain": 4863, - "inhabitants": 4864, - "towns": 4865, - "extensive": 4866, - "narrow": 4867, - "51": 4868, - "jane": 4869, - "flowers": 4870, - "images": 4871, - "promise": 4872, - "somewhere": 4873, - "object": 4874, - "fly": 4875, - "closely": 4876, - "##ls": 4877, - "1912": 4878, - "bureau": 4879, - "cape": 4880, - "1926": 4881, - "weekly": 4882, - "presidential": 4883, - "legislative": 4884, - "1921": 4885, - "##ai": 4886, - "##au": 4887, - "launch": 4888, - "founding": 4889, - "##ny": 4890, - "978": 4891, - "##ring": 4892, - "artillery": 4893, - "strike": 4894, - "un": 4895, - "institutions": 4896, - "roll": 4897, - "writers": 4898, - "landing": 4899, - "chose": 4900, - "kevin": 4901, - "anymore": 4902, - "pp": 4903, - "##ut": 4904, - "attorney": 4905, - "fit": 4906, - "dan": 4907, - "billboard": 4908, - "receiving": 4909, - "agricultural": 4910, - "breaking": 4911, - "sought": 4912, - "dave": 4913, - "admitted": 4914, - "lands": 4915, - "mexican": 4916, - "##bury": 4917, - "charlie": 4918, - "specifically": 4919, - "hole": 4920, - "iv": 4921, - "howard": 4922, - "credit": 4923, - "moscow": 4924, - "roads": 4925, - "accident": 4926, - "1923": 4927, - "proved": 4928, - "wear": 4929, - "struck": 4930, - "hey": 4931, - "guards": 4932, - "stuff": 4933, - "slid": 4934, - "expansion": 4935, - "1915": 4936, - "cat": 4937, - "anthony": 4938, - "##kin": 4939, - "melbourne": 4940, - "opposed": 4941, - "sub": 4942, - "southwest": 4943, - "architect": 4944, - "failure": 4945, - "plane": 4946, - "1916": 4947, - "##ron": 4948, - "map": 4949, - "camera": 4950, - "tank": 4951, - "listen": 4952, - "regarding": 4953, - "wet": 4954, - "introduction": 4955, - "metropolitan": 4956, - "link": 4957, - "ep": 4958, - "fighter": 4959, - "inch": 4960, - "grown": 4961, - "gene": 4962, - "anger": 4963, - "fixed": 4964, - "buy": 4965, - "dvd": 4966, - "khan": 4967, - "domestic": 4968, - "worldwide": 4969, - "chapel": 4970, - "mill": 4971, - "functions": 4972, - "examples": 4973, - "##head": 4974, - "developing": 4975, - "1910": 4976, - "turkey": 4977, - "hits": 4978, - "pocket": 4979, - "antonio": 4980, - "papers": 4981, - "grow": 4982, - "unless": 4983, - "circuit": 4984, - "18th": 4985, - "concerned": 4986, - "attached": 4987, - "journalist": 4988, - "selection": 4989, - "journey": 4990, - "converted": 4991, - "provincial": 4992, - "painted": 4993, - "hearing": 4994, - "aren": 4995, - "bands": 4996, - "negative": 4997, - "aside": 4998, - "wondered": 4999, - "knight": 5000, - "lap": 5001, - "survey": 5002, - "ma": 5003, - "##ow": 5004, - "noise": 5005, - "billy": 5006, - "##ium": 5007, - "shooting": 5008, - "guide": 5009, - "bedroom": 5010, - "priest": 5011, - "resistance": 5012, - "motor": 5013, - "homes": 5014, - "sounded": 5015, - "giant": 5016, - "##mer": 5017, - "150": 5018, - "scenes": 5019, - "equal": 5020, - "comic": 5021, - "patients": 5022, - "hidden": 5023, - "solid": 5024, - "actual": 5025, - "bringing": 5026, - "afternoon": 5027, - "touched": 5028, - "funds": 5029, - "wedding": 5030, - "consisted": 5031, - "marie": 5032, - "canal": 5033, - "sr": 5034, - "kim": 5035, - "treaty": 5036, - "turkish": 5037, - "recognition": 5038, - "residence": 5039, - "cathedral": 5040, - "broad": 5041, - "knees": 5042, - "incident": 5043, - "shaped": 5044, - "fired": 5045, - "norwegian": 5046, - "handle": 5047, - "cheek": 5048, - "contest": 5049, - "represent": 5050, - "##pe": 5051, - "representing": 5052, - "beauty": 5053, - "##sen": 5054, - "birds": 5055, - "advantage": 5056, - "emergency": 5057, - "wrapped": 5058, - "drawing": 5059, - "notice": 5060, - "pink": 5061, - "broadcasting": 5062, - "##ong": 5063, - "somehow": 5064, - "bachelor": 5065, - "seventh": 5066, - "collected": 5067, - "registered": 5068, - "establishment": 5069, - "alan": 5070, - "assumed": 5071, - "chemical": 5072, - "personnel": 5073, - "roger": 5074, - "retirement": 5075, - "jeff": 5076, - "portuguese": 5077, - "wore": 5078, - "tied": 5079, - "device": 5080, - "threat": 5081, - "progress": 5082, - "advance": 5083, - "##ised": 5084, - "banks": 5085, - "hired": 5086, - "manchester": 5087, - "nfl": 5088, - "teachers": 5089, - "structures": 5090, - "forever": 5091, - "##bo": 5092, - "tennis": 5093, - "helping": 5094, - "saturday": 5095, - "sale": 5096, - "applications": 5097, - "junction": 5098, - "hip": 5099, - "incorporated": 5100, - "neighborhood": 5101, - "dressed": 5102, - "ceremony": 5103, - "##ds": 5104, - "influenced": 5105, - "hers": 5106, - "visual": 5107, - "stairs": 5108, - "decades": 5109, - "inner": 5110, - "kansas": 5111, - "hung": 5112, - "hoped": 5113, - "gain": 5114, - "scheduled": 5115, - "downtown": 5116, - "engaged": 5117, - "austria": 5118, - "clock": 5119, - "norway": 5120, - "certainly": 5121, - "pale": 5122, - "protected": 5123, - "1913": 5124, - "victor": 5125, - "employees": 5126, - "plate": 5127, - "putting": 5128, - "surrounded": 5129, - "##ists": 5130, - "finishing": 5131, - "blues": 5132, - "tropical": 5133, - "##ries": 5134, - "minnesota": 5135, - "consider": 5136, - "philippines": 5137, - "accept": 5138, - "54": 5139, - "retrieved": 5140, - "1900": 5141, - "concern": 5142, - "anderson": 5143, - "properties": 5144, - "institution": 5145, - "gordon": 5146, - "successfully": 5147, - "vietnam": 5148, - "##dy": 5149, - "backing": 5150, - "outstanding": 5151, - "muslim": 5152, - "crossing": 5153, - "folk": 5154, - "producing": 5155, - "usual": 5156, - "demand": 5157, - "occurs": 5158, - "observed": 5159, - "lawyer": 5160, - "educated": 5161, - "##ana": 5162, - "kelly": 5163, - "string": 5164, - "pleasure": 5165, - "budget": 5166, - "items": 5167, - "quietly": 5168, - "colorado": 5169, - "philip": 5170, - "typical": 5171, - "##worth": 5172, - "derived": 5173, - "600": 5174, - "survived": 5175, - "asks": 5176, - "mental": 5177, - "##ide": 5178, - "56": 5179, - "jake": 5180, - "jews": 5181, - "distinguished": 5182, - "ltd": 5183, - "1911": 5184, - "sri": 5185, - "extremely": 5186, - "53": 5187, - "athletic": 5188, - "loud": 5189, - "thousands": 5190, - "worried": 5191, - "shadow": 5192, - "transportation": 5193, - "horses": 5194, - "weapon": 5195, - "arena": 5196, - "importance": 5197, - "users": 5198, - "tim": 5199, - "objects": 5200, - "contributed": 5201, - "dragon": 5202, - "douglas": 5203, - "aware": 5204, - "senator": 5205, - "johnny": 5206, - "jordan": 5207, - "sisters": 5208, - "engines": 5209, - "flag": 5210, - "investment": 5211, - "samuel": 5212, - "shock": 5213, - "capable": 5214, - "clark": 5215, - "row": 5216, - "wheel": 5217, - "refers": 5218, - "session": 5219, - "familiar": 5220, - "biggest": 5221, - "wins": 5222, - "hate": 5223, - "maintained": 5224, - "drove": 5225, - "hamilton": 5226, - "request": 5227, - "expressed": 5228, - "injured": 5229, - "underground": 5230, - "churches": 5231, - "walker": 5232, - "wars": 5233, - "tunnel": 5234, - "passes": 5235, - "stupid": 5236, - "agriculture": 5237, - "softly": 5238, - "cabinet": 5239, - "regarded": 5240, - "joining": 5241, - "indiana": 5242, - "##ea": 5243, - "##ms": 5244, - "push": 5245, - "dates": 5246, - "spend": 5247, - "behavior": 5248, - "woods": 5249, - "protein": 5250, - "gently": 5251, - "chase": 5252, - "morgan": 5253, - "mention": 5254, - "burning": 5255, - "wake": 5256, - "combination": 5257, - "occur": 5258, - "mirror": 5259, - "leads": 5260, - "jimmy": 5261, - "indeed": 5262, - "impossible": 5263, - "singapore": 5264, - "paintings": 5265, - "covering": 5266, - "##nes": 5267, - "soldier": 5268, - "locations": 5269, - "attendance": 5270, - "sell": 5271, - "historian": 5272, - "wisconsin": 5273, - "invasion": 5274, - "argued": 5275, - "painter": 5276, - "diego": 5277, - "changing": 5278, - "egypt": 5279, - "##don": 5280, - "experienced": 5281, - "inches": 5282, - "##ku": 5283, - "missouri": 5284, - "vol": 5285, - "grounds": 5286, - "spoken": 5287, - "switzerland": 5288, - "##gan": 5289, - "reform": 5290, - "rolling": 5291, - "ha": 5292, - "forget": 5293, - "massive": 5294, - "resigned": 5295, - "burned": 5296, - "allen": 5297, - "tennessee": 5298, - "locked": 5299, - "values": 5300, - "improved": 5301, - "##mo": 5302, - "wounded": 5303, - "universe": 5304, - "sick": 5305, - "dating": 5306, - "facing": 5307, - "pack": 5308, - "purchase": 5309, - "user": 5310, - "##pur": 5311, - "moments": 5312, - "##ul": 5313, - "merged": 5314, - "anniversary": 5315, - "1908": 5316, - "coal": 5317, - "brick": 5318, - "understood": 5319, - "causes": 5320, - "dynasty": 5321, - "queensland": 5322, - "establish": 5323, - "stores": 5324, - "crisis": 5325, - "promote": 5326, - "hoping": 5327, - "views": 5328, - "cards": 5329, - "referee": 5330, - "extension": 5331, - "##si": 5332, - "raise": 5333, - "arizona": 5334, - "improve": 5335, - "colonial": 5336, - "formal": 5337, - "charged": 5338, - "##rt": 5339, - "palm": 5340, - "lucky": 5341, - "hide": 5342, - "rescue": 5343, - "faces": 5344, - "95": 5345, - "feelings": 5346, - "candidates": 5347, - "juan": 5348, - "##ell": 5349, - "goods": 5350, - "6th": 5351, - "courses": 5352, - "weekend": 5353, - "59": 5354, - "luke": 5355, - "cash": 5356, - "fallen": 5357, - "##om": 5358, - "delivered": 5359, - "affected": 5360, - "installed": 5361, - "carefully": 5362, - "tries": 5363, - "swiss": 5364, - "hollywood": 5365, - "costs": 5366, - "lincoln": 5367, - "responsibility": 5368, - "##he": 5369, - "shore": 5370, - "file": 5371, - "proper": 5372, - "normally": 5373, - "maryland": 5374, - "assistance": 5375, - "jump": 5376, - "constant": 5377, - "offering": 5378, - "friendly": 5379, - "waters": 5380, - "persons": 5381, - "realize": 5382, - "contain": 5383, - "trophy": 5384, - "800": 5385, - "partnership": 5386, - "factor": 5387, - "58": 5388, - "musicians": 5389, - "cry": 5390, - "bound": 5391, - "oregon": 5392, - "indicated": 5393, - "hero": 5394, - "houston": 5395, - "medium": 5396, - "##ure": 5397, - "consisting": 5398, - "somewhat": 5399, - "##ara": 5400, - "57": 5401, - "cycle": 5402, - "##che": 5403, - "beer": 5404, - "moore": 5405, - "frederick": 5406, - "gotten": 5407, - "eleven": 5408, - "worst": 5409, - "weak": 5410, - "approached": 5411, - "arranged": 5412, - "chin": 5413, - "loan": 5414, - "universal": 5415, - "bond": 5416, - "fifteen": 5417, - "pattern": 5418, - "disappeared": 5419, - "##ney": 5420, - "translated": 5421, - "##zed": 5422, - "lip": 5423, - "arab": 5424, - "capture": 5425, - "interests": 5426, - "insurance": 5427, - "##chi": 5428, - "shifted": 5429, - "cave": 5430, - "prix": 5431, - "warning": 5432, - "sections": 5433, - "courts": 5434, - "coat": 5435, - "plot": 5436, - "smell": 5437, - "feed": 5438, - "golf": 5439, - "favorite": 5440, - "maintain": 5441, - "knife": 5442, - "vs": 5443, - "voted": 5444, - "degrees": 5445, - "finance": 5446, - "quebec": 5447, - "opinion": 5448, - "translation": 5449, - "manner": 5450, - "ruled": 5451, - "operate": 5452, - "productions": 5453, - "choose": 5454, - "musician": 5455, - "discovery": 5456, - "confused": 5457, - "tired": 5458, - "separated": 5459, - "stream": 5460, - "techniques": 5461, - "committed": 5462, - "attend": 5463, - "ranking": 5464, - "kings": 5465, - "throw": 5466, - "passengers": 5467, - "measure": 5468, - "horror": 5469, - "fan": 5470, - "mining": 5471, - "sand": 5472, - "danger": 5473, - "salt": 5474, - "calm": 5475, - "decade": 5476, - "dam": 5477, - "require": 5478, - "runner": 5479, - "##ik": 5480, - "rush": 5481, - "associate": 5482, - "greece": 5483, - "##ker": 5484, - "rivers": 5485, - "consecutive": 5486, - "matthew": 5487, - "##ski": 5488, - "sighed": 5489, - "sq": 5490, - "documents": 5491, - "steam": 5492, - "edited": 5493, - "closing": 5494, - "tie": 5495, - "accused": 5496, - "1905": 5497, - "##ini": 5498, - "islamic": 5499, - "distributed": 5500, - "directors": 5501, - "organisation": 5502, - "bruce": 5503, - "7th": 5504, - "breathing": 5505, - "mad": 5506, - "lit": 5507, - "arrival": 5508, - "concrete": 5509, - "taste": 5510, - "08": 5511, - "composition": 5512, - "shaking": 5513, - "faster": 5514, - "amateur": 5515, - "adjacent": 5516, - "stating": 5517, - "1906": 5518, - "twin": 5519, - "flew": 5520, - "##ran": 5521, - "tokyo": 5522, - "publications": 5523, - "##tone": 5524, - "obviously": 5525, - "ridge": 5526, - "storage": 5527, - "1907": 5528, - "carl": 5529, - "pages": 5530, - "concluded": 5531, - "desert": 5532, - "driven": 5533, - "universities": 5534, - "ages": 5535, - "terminal": 5536, - "sequence": 5537, - "borough": 5538, - "250": 5539, - "constituency": 5540, - "creative": 5541, - "cousin": 5542, - "economics": 5543, - "dreams": 5544, - "margaret": 5545, - "notably": 5546, - "reduce": 5547, - "montreal": 5548, - "mode": 5549, - "17th": 5550, - "ears": 5551, - "saved": 5552, - "jan": 5553, - "vocal": 5554, - "##ica": 5555, - "1909": 5556, - "andy": 5557, - "##jo": 5558, - "riding": 5559, - "roughly": 5560, - "threatened": 5561, - "##ise": 5562, - "meters": 5563, - "meanwhile": 5564, - "landed": 5565, - "compete": 5566, - "repeated": 5567, - "grass": 5568, - "czech": 5569, - "regularly": 5570, - "charges": 5571, - "tea": 5572, - "sudden": 5573, - "appeal": 5574, - "##ung": 5575, - "solution": 5576, - "describes": 5577, - "pierre": 5578, - "classification": 5579, - "glad": 5580, - "parking": 5581, - "##ning": 5582, - "belt": 5583, - "physics": 5584, - "99": 5585, - "rachel": 5586, - "add": 5587, - "hungarian": 5588, - "participate": 5589, - "expedition": 5590, - "damaged": 5591, - "gift": 5592, - "childhood": 5593, - "85": 5594, - "fifty": 5595, - "##red": 5596, - "mathematics": 5597, - "jumped": 5598, - "letting": 5599, - "defensive": 5600, - "mph": 5601, - "##ux": 5602, - "##gh": 5603, - "testing": 5604, - "##hip": 5605, - "hundreds": 5606, - "shoot": 5607, - "owners": 5608, - "matters": 5609, - "smoke": 5610, - "israeli": 5611, - "kentucky": 5612, - "dancing": 5613, - "mounted": 5614, - "grandfather": 5615, - "emma": 5616, - "designs": 5617, - "profit": 5618, - "argentina": 5619, - "##gs": 5620, - "truly": 5621, - "li": 5622, - "lawrence": 5623, - "cole": 5624, - "begun": 5625, - "detroit": 5626, - "willing": 5627, - "branches": 5628, - "smiling": 5629, - "decide": 5630, - "miami": 5631, - "enjoyed": 5632, - "recordings": 5633, - "##dale": 5634, - "poverty": 5635, - "ethnic": 5636, - "gay": 5637, - "##bi": 5638, - "gary": 5639, - "arabic": 5640, - "09": 5641, - "accompanied": 5642, - "##one": 5643, - "##ons": 5644, - "fishing": 5645, - "determine": 5646, - "residential": 5647, - "acid": 5648, - "##ary": 5649, - "alice": 5650, - "returns": 5651, - "starred": 5652, - "mail": 5653, - "##ang": 5654, - "jonathan": 5655, - "strategy": 5656, - "##ue": 5657, - "net": 5658, - "forty": 5659, - "cook": 5660, - "businesses": 5661, - "equivalent": 5662, - "commonwealth": 5663, - "distinct": 5664, - "ill": 5665, - "##cy": 5666, - "seriously": 5667, - "##ors": 5668, - "##ped": 5669, - "shift": 5670, - "harris": 5671, - "replace": 5672, - "rio": 5673, - "imagine": 5674, - "formula": 5675, - "ensure": 5676, - "##ber": 5677, - "additionally": 5678, - "scheme": 5679, - "conservation": 5680, - "occasionally": 5681, - "purposes": 5682, - "feels": 5683, - "favor": 5684, - "##and": 5685, - "##ore": 5686, - "1930s": 5687, - "contrast": 5688, - "hanging": 5689, - "hunt": 5690, - "movies": 5691, - "1904": 5692, - "instruments": 5693, - "victims": 5694, - "danish": 5695, - "christopher": 5696, - "busy": 5697, - "demon": 5698, - "sugar": 5699, - "earliest": 5700, - "colony": 5701, - "studying": 5702, - "balance": 5703, - "duties": 5704, - "##ks": 5705, - "belgium": 5706, - "slipped": 5707, - "carter": 5708, - "05": 5709, - "visible": 5710, - "stages": 5711, - "iraq": 5712, - "fifa": 5713, - "##im": 5714, - "commune": 5715, - "forming": 5716, - "zero": 5717, - "07": 5718, - "continuing": 5719, - "talked": 5720, - "counties": 5721, - "legend": 5722, - "bathroom": 5723, - "option": 5724, - "tail": 5725, - "clay": 5726, - "daughters": 5727, - "afterwards": 5728, - "severe": 5729, - "jaw": 5730, - "visitors": 5731, - "##ded": 5732, - "devices": 5733, - "aviation": 5734, - "russell": 5735, - "kate": 5736, - "##vi": 5737, - "entering": 5738, - "subjects": 5739, - "##ino": 5740, - "temporary": 5741, - "swimming": 5742, - "forth": 5743, - "smooth": 5744, - "ghost": 5745, - "audio": 5746, - "bush": 5747, - "operates": 5748, - "rocks": 5749, - "movements": 5750, - "signs": 5751, - "eddie": 5752, - "##tz": 5753, - "ann": 5754, - "voices": 5755, - "honorary": 5756, - "06": 5757, - "memories": 5758, - "dallas": 5759, - "pure": 5760, - "measures": 5761, - "racial": 5762, - "promised": 5763, - "66": 5764, - "harvard": 5765, - "ceo": 5766, - "16th": 5767, - "parliamentary": 5768, - "indicate": 5769, - "benefit": 5770, - "flesh": 5771, - "dublin": 5772, - "louisiana": 5773, - "1902": 5774, - "1901": 5775, - "patient": 5776, - "sleeping": 5777, - "1903": 5778, - "membership": 5779, - "coastal": 5780, - "medieval": 5781, - "wanting": 5782, - "element": 5783, - "scholars": 5784, - "rice": 5785, - "62": 5786, - "limit": 5787, - "survive": 5788, - "makeup": 5789, - "rating": 5790, - "definitely": 5791, - "collaboration": 5792, - "obvious": 5793, - "##tan": 5794, - "boss": 5795, - "ms": 5796, - "baron": 5797, - "birthday": 5798, - "linked": 5799, - "soil": 5800, - "diocese": 5801, - "##lan": 5802, - "ncaa": 5803, - "##mann": 5804, - "offensive": 5805, - "shell": 5806, - "shouldn": 5807, - "waist": 5808, - "##tus": 5809, - "plain": 5810, - "ross": 5811, - "organ": 5812, - "resolution": 5813, - "manufacturing": 5814, - "adding": 5815, - "relative": 5816, - "kennedy": 5817, - "98": 5818, - "whilst": 5819, - "moth": 5820, - "marketing": 5821, - "gardens": 5822, - "crash": 5823, - "72": 5824, - "heading": 5825, - "partners": 5826, - "credited": 5827, - "carlos": 5828, - "moves": 5829, - "cable": 5830, - "##zi": 5831, - "marshall": 5832, - "##out": 5833, - "depending": 5834, - "bottle": 5835, - "represents": 5836, - "rejected": 5837, - "responded": 5838, - "existed": 5839, - "04": 5840, - "jobs": 5841, - "denmark": 5842, - "lock": 5843, - "##ating": 5844, - "treated": 5845, - "graham": 5846, - "routes": 5847, - "talent": 5848, - "commissioner": 5849, - "drugs": 5850, - "secure": 5851, - "tests": 5852, - "reign": 5853, - "restored": 5854, - "photography": 5855, - "##gi": 5856, - "contributions": 5857, - "oklahoma": 5858, - "designer": 5859, - "disc": 5860, - "grin": 5861, - "seattle": 5862, - "robin": 5863, - "paused": 5864, - "atlanta": 5865, - "unusual": 5866, - "##gate": 5867, - "praised": 5868, - "las": 5869, - "laughing": 5870, - "satellite": 5871, - "hungary": 5872, - "visiting": 5873, - "##sky": 5874, - "interesting": 5875, - "factors": 5876, - "deck": 5877, - "poems": 5878, - "norman": 5879, - "##water": 5880, - "stuck": 5881, - "speaker": 5882, - "rifle": 5883, - "domain": 5884, - "premiered": 5885, - "##her": 5886, - "dc": 5887, - "comics": 5888, - "actors": 5889, - "01": 5890, - "reputation": 5891, - "eliminated": 5892, - "8th": 5893, - "ceiling": 5894, - "prisoners": 5895, - "script": 5896, - "##nce": 5897, - "leather": 5898, - "austin": 5899, - "mississippi": 5900, - "rapidly": 5901, - "admiral": 5902, - "parallel": 5903, - "charlotte": 5904, - "guilty": 5905, - "tools": 5906, - "gender": 5907, - "divisions": 5908, - "fruit": 5909, - "##bs": 5910, - "laboratory": 5911, - "nelson": 5912, - "fantasy": 5913, - "marry": 5914, - "rapid": 5915, - "aunt": 5916, - "tribe": 5917, - "requirements": 5918, - "aspects": 5919, - "suicide": 5920, - "amongst": 5921, - "adams": 5922, - "bone": 5923, - "ukraine": 5924, - "abc": 5925, - "kick": 5926, - "sees": 5927, - "edinburgh": 5928, - "clothing": 5929, - "column": 5930, - "rough": 5931, - "gods": 5932, - "hunting": 5933, - "broadway": 5934, - "gathered": 5935, - "concerns": 5936, - "##ek": 5937, - "spending": 5938, - "ty": 5939, - "12th": 5940, - "snapped": 5941, - "requires": 5942, - "solar": 5943, - "bones": 5944, - "cavalry": 5945, - "##tta": 5946, - "iowa": 5947, - "drinking": 5948, - "waste": 5949, - "index": 5950, - "franklin": 5951, - "charity": 5952, - "thompson": 5953, - "stewart": 5954, - "tip": 5955, - "flash": 5956, - "landscape": 5957, - "friday": 5958, - "enjoy": 5959, - "singh": 5960, - "poem": 5961, - "listening": 5962, - "##back": 5963, - "eighth": 5964, - "fred": 5965, - "differences": 5966, - "adapted": 5967, - "bomb": 5968, - "ukrainian": 5969, - "surgery": 5970, - "corporate": 5971, - "masters": 5972, - "anywhere": 5973, - "##more": 5974, - "waves": 5975, - "odd": 5976, - "sean": 5977, - "portugal": 5978, - "orleans": 5979, - "dick": 5980, - "debate": 5981, - "kent": 5982, - "eating": 5983, - "puerto": 5984, - "cleared": 5985, - "96": 5986, - "expect": 5987, - "cinema": 5988, - "97": 5989, - "guitarist": 5990, - "blocks": 5991, - "electrical": 5992, - "agree": 5993, - "involving": 5994, - "depth": 5995, - "dying": 5996, - "panel": 5997, - "struggle": 5998, - "##ged": 5999, - "peninsula": 6000, - "adults": 6001, - "novels": 6002, - "emerged": 6003, - "vienna": 6004, - "metro": 6005, - "debuted": 6006, - "shoes": 6007, - "tamil": 6008, - "songwriter": 6009, - "meets": 6010, - "prove": 6011, - "beating": 6012, - "instance": 6013, - "heaven": 6014, - "scared": 6015, - "sending": 6016, - "marks": 6017, - "artistic": 6018, - "passage": 6019, - "superior": 6020, - "03": 6021, - "significantly": 6022, - "shopping": 6023, - "##tive": 6024, - "retained": 6025, - "##izing": 6026, - "malaysia": 6027, - "technique": 6028, - "cheeks": 6029, - "##ola": 6030, - "warren": 6031, - "maintenance": 6032, - "destroy": 6033, - "extreme": 6034, - "allied": 6035, - "120": 6036, - "appearing": 6037, - "##yn": 6038, - "fill": 6039, - "advice": 6040, - "alabama": 6041, - "qualifying": 6042, - "policies": 6043, - "cleveland": 6044, - "hat": 6045, - "battery": 6046, - "smart": 6047, - "authors": 6048, - "10th": 6049, - "soundtrack": 6050, - "acted": 6051, - "dated": 6052, - "lb": 6053, - "glance": 6054, - "equipped": 6055, - "coalition": 6056, - "funny": 6057, - "outer": 6058, - "ambassador": 6059, - "roy": 6060, - "possibility": 6061, - "couples": 6062, - "campbell": 6063, - "dna": 6064, - "loose": 6065, - "ethan": 6066, - "supplies": 6067, - "1898": 6068, - "gonna": 6069, - "88": 6070, - "monster": 6071, - "##res": 6072, - "shake": 6073, - "agents": 6074, - "frequency": 6075, - "springs": 6076, - "dogs": 6077, - "practices": 6078, - "61": 6079, - "gang": 6080, - "plastic": 6081, - "easier": 6082, - "suggests": 6083, - "gulf": 6084, - "blade": 6085, - "exposed": 6086, - "colors": 6087, - "industries": 6088, - "markets": 6089, - "pan": 6090, - "nervous": 6091, - "electoral": 6092, - "charts": 6093, - "legislation": 6094, - "ownership": 6095, - "##idae": 6096, - "mac": 6097, - "appointment": 6098, - "shield": 6099, - "copy": 6100, - "assault": 6101, - "socialist": 6102, - "abbey": 6103, - "monument": 6104, - "license": 6105, - "throne": 6106, - "employment": 6107, - "jay": 6108, - "93": 6109, - "replacement": 6110, - "charter": 6111, - "cloud": 6112, - "powered": 6113, - "suffering": 6114, - "accounts": 6115, - "oak": 6116, - "connecticut": 6117, - "strongly": 6118, - "wright": 6119, - "colour": 6120, - "crystal": 6121, - "13th": 6122, - "context": 6123, - "welsh": 6124, - "networks": 6125, - "voiced": 6126, - "gabriel": 6127, - "jerry": 6128, - "##cing": 6129, - "forehead": 6130, - "mp": 6131, - "##ens": 6132, - "manage": 6133, - "schedule": 6134, - "totally": 6135, - "remix": 6136, - "##ii": 6137, - "forests": 6138, - "occupation": 6139, - "print": 6140, - "nicholas": 6141, - "brazilian": 6142, - "strategic": 6143, - "vampires": 6144, - "engineers": 6145, - "76": 6146, - "roots": 6147, - "seek": 6148, - "correct": 6149, - "instrumental": 6150, - "und": 6151, - "alfred": 6152, - "backed": 6153, - "hop": 6154, - "##des": 6155, - "stanley": 6156, - "robinson": 6157, - "traveled": 6158, - "wayne": 6159, - "welcome": 6160, - "austrian": 6161, - "achieve": 6162, - "67": 6163, - "exit": 6164, - "rates": 6165, - "1899": 6166, - "strip": 6167, - "whereas": 6168, - "##cs": 6169, - "sing": 6170, - "deeply": 6171, - "adventure": 6172, - "bobby": 6173, - "rick": 6174, - "jamie": 6175, - "careful": 6176, - "components": 6177, - "cap": 6178, - "useful": 6179, - "personality": 6180, - "knee": 6181, - "##shi": 6182, - "pushing": 6183, - "hosts": 6184, - "02": 6185, - "protest": 6186, - "ca": 6187, - "ottoman": 6188, - "symphony": 6189, - "##sis": 6190, - "63": 6191, - "boundary": 6192, - "1890": 6193, - "processes": 6194, - "considering": 6195, - "considerable": 6196, - "tons": 6197, - "##work": 6198, - "##ft": 6199, - "##nia": 6200, - "cooper": 6201, - "trading": 6202, - "dear": 6203, - "conduct": 6204, - "91": 6205, - "illegal": 6206, - "apple": 6207, - "revolutionary": 6208, - "holiday": 6209, - "definition": 6210, - "harder": 6211, - "##van": 6212, - "jacob": 6213, - "circumstances": 6214, - "destruction": 6215, - "##lle": 6216, - "popularity": 6217, - "grip": 6218, - "classified": 6219, - "liverpool": 6220, - "donald": 6221, - "baltimore": 6222, - "flows": 6223, - "seeking": 6224, - "honour": 6225, - "approval": 6226, - "92": 6227, - "mechanical": 6228, - "till": 6229, - "happening": 6230, - "statue": 6231, - "critic": 6232, - "increasingly": 6233, - "immediate": 6234, - "describe": 6235, - "commerce": 6236, - "stare": 6237, - "##ster": 6238, - "indonesia": 6239, - "meat": 6240, - "rounds": 6241, - "boats": 6242, - "baker": 6243, - "orthodox": 6244, - "depression": 6245, - "formally": 6246, - "worn": 6247, - "naked": 6248, - "claire": 6249, - "muttered": 6250, - "sentence": 6251, - "11th": 6252, - "emily": 6253, - "document": 6254, - "77": 6255, - "criticism": 6256, - "wished": 6257, - "vessel": 6258, - "spiritual": 6259, - "bent": 6260, - "virgin": 6261, - "parker": 6262, - "minimum": 6263, - "murray": 6264, - "lunch": 6265, - "danny": 6266, - "printed": 6267, - "compilation": 6268, - "keyboards": 6269, - "false": 6270, - "blow": 6271, - "belonged": 6272, - "68": 6273, - "raising": 6274, - "78": 6275, - "cutting": 6276, - "##board": 6277, - "pittsburgh": 6278, - "##up": 6279, - "9th": 6280, - "shadows": 6281, - "81": 6282, - "hated": 6283, - "indigenous": 6284, - "jon": 6285, - "15th": 6286, - "barry": 6287, - "scholar": 6288, - "ah": 6289, - "##zer": 6290, - "oliver": 6291, - "##gy": 6292, - "stick": 6293, - "susan": 6294, - "meetings": 6295, - "attracted": 6296, - "spell": 6297, - "romantic": 6298, - "##ver": 6299, - "ye": 6300, - "1895": 6301, - "photo": 6302, - "demanded": 6303, - "customers": 6304, - "##ac": 6305, - "1896": 6306, - "logan": 6307, - "revival": 6308, - "keys": 6309, - "modified": 6310, - "commanded": 6311, - "jeans": 6312, - "##ious": 6313, - "upset": 6314, - "raw": 6315, - "phil": 6316, - "detective": 6317, - "hiding": 6318, - "resident": 6319, - "vincent": 6320, - "##bly": 6321, - "experiences": 6322, - "diamond": 6323, - "defeating": 6324, - "coverage": 6325, - "lucas": 6326, - "external": 6327, - "parks": 6328, - "franchise": 6329, - "helen": 6330, - "bible": 6331, - "successor": 6332, - "percussion": 6333, - "celebrated": 6334, - "il": 6335, - "lift": 6336, - "profile": 6337, - "clan": 6338, - "romania": 6339, - "##ied": 6340, - "mills": 6341, - "##su": 6342, - "nobody": 6343, - "achievement": 6344, - "shrugged": 6345, - "fault": 6346, - "1897": 6347, - "rhythm": 6348, - "initiative": 6349, - "breakfast": 6350, - "carbon": 6351, - "700": 6352, - "69": 6353, - "lasted": 6354, - "violent": 6355, - "74": 6356, - "wound": 6357, - "ken": 6358, - "killer": 6359, - "gradually": 6360, - "filmed": 6361, - "°c": 6362, - "dollars": 6363, - "processing": 6364, - "94": 6365, - "remove": 6366, - "criticized": 6367, - "guests": 6368, - "sang": 6369, - "chemistry": 6370, - "##vin": 6371, - "legislature": 6372, - "disney": 6373, - "##bridge": 6374, - "uniform": 6375, - "escaped": 6376, - "integrated": 6377, - "proposal": 6378, - "purple": 6379, - "denied": 6380, - "liquid": 6381, - "karl": 6382, - "influential": 6383, - "morris": 6384, - "nights": 6385, - "stones": 6386, - "intense": 6387, - "experimental": 6388, - "twisted": 6389, - "71": 6390, - "84": 6391, - "##ld": 6392, - "pace": 6393, - "nazi": 6394, - "mitchell": 6395, - "ny": 6396, - "blind": 6397, - "reporter": 6398, - "newspapers": 6399, - "14th": 6400, - "centers": 6401, - "burn": 6402, - "basin": 6403, - "forgotten": 6404, - "surviving": 6405, - "filed": 6406, - "collections": 6407, - "monastery": 6408, - "losses": 6409, - "manual": 6410, - "couch": 6411, - "description": 6412, - "appropriate": 6413, - "merely": 6414, - "tag": 6415, - "missions": 6416, - "sebastian": 6417, - "restoration": 6418, - "replacing": 6419, - "triple": 6420, - "73": 6421, - "elder": 6422, - "julia": 6423, - "warriors": 6424, - "benjamin": 6425, - "julian": 6426, - "convinced": 6427, - "stronger": 6428, - "amazing": 6429, - "declined": 6430, - "versus": 6431, - "merchant": 6432, - "happens": 6433, - "output": 6434, - "finland": 6435, - "bare": 6436, - "barbara": 6437, - "absence": 6438, - "ignored": 6439, - "dawn": 6440, - "injuries": 6441, - "##port": 6442, - "producers": 6443, - "##ram": 6444, - "82": 6445, - "luis": 6446, - "##ities": 6447, - "kw": 6448, - "admit": 6449, - "expensive": 6450, - "electricity": 6451, - "nba": 6452, - "exception": 6453, - "symbol": 6454, - "##ving": 6455, - "ladies": 6456, - "shower": 6457, - "sheriff": 6458, - "characteristics": 6459, - "##je": 6460, - "aimed": 6461, - "button": 6462, - "ratio": 6463, - "effectively": 6464, - "summit": 6465, - "angle": 6466, - "jury": 6467, - "bears": 6468, - "foster": 6469, - "vessels": 6470, - "pants": 6471, - "executed": 6472, - "evans": 6473, - "dozen": 6474, - "advertising": 6475, - "kicked": 6476, - "patrol": 6477, - "1889": 6478, - "competitions": 6479, - "lifetime": 6480, - "principles": 6481, - "athletics": 6482, - "##logy": 6483, - "birmingham": 6484, - "sponsored": 6485, - "89": 6486, - "rob": 6487, - "nomination": 6488, - "1893": 6489, - "acoustic": 6490, - "##sm": 6491, - "creature": 6492, - "longest": 6493, - "##tra": 6494, - "credits": 6495, - "harbor": 6496, - "dust": 6497, - "josh": 6498, - "##so": 6499, - "territories": 6500, - "milk": 6501, - "infrastructure": 6502, - "completion": 6503, - "thailand": 6504, - "indians": 6505, - "leon": 6506, - "archbishop": 6507, - "##sy": 6508, - "assist": 6509, - "pitch": 6510, - "blake": 6511, - "arrangement": 6512, - "girlfriend": 6513, - "serbian": 6514, - "operational": 6515, - "hence": 6516, - "sad": 6517, - "scent": 6518, - "fur": 6519, - "dj": 6520, - "sessions": 6521, - "hp": 6522, - "refer": 6523, - "rarely": 6524, - "##ora": 6525, - "exists": 6526, - "1892": 6527, - "##ten": 6528, - "scientists": 6529, - "dirty": 6530, - "penalty": 6531, - "burst": 6532, - "portrait": 6533, - "seed": 6534, - "79": 6535, - "pole": 6536, - "limits": 6537, - "rival": 6538, - "1894": 6539, - "stable": 6540, - "alpha": 6541, - "grave": 6542, - "constitutional": 6543, - "alcohol": 6544, - "arrest": 6545, - "flower": 6546, - "mystery": 6547, - "devil": 6548, - "architectural": 6549, - "relationships": 6550, - "greatly": 6551, - "habitat": 6552, - "##istic": 6553, - "larry": 6554, - "progressive": 6555, - "remote": 6556, - "cotton": 6557, - "##ics": 6558, - "##ok": 6559, - "preserved": 6560, - "reaches": 6561, - "##ming": 6562, - "cited": 6563, - "86": 6564, - "vast": 6565, - "scholarship": 6566, - "decisions": 6567, - "cbs": 6568, - "joy": 6569, - "teach": 6570, - "1885": 6571, - "editions": 6572, - "knocked": 6573, - "eve": 6574, - "searching": 6575, - "partly": 6576, - "participation": 6577, - "gap": 6578, - "animated": 6579, - "fate": 6580, - "excellent": 6581, - "##ett": 6582, - "na": 6583, - "87": 6584, - "alternate": 6585, - "saints": 6586, - "youngest": 6587, - "##ily": 6588, - "climbed": 6589, - "##ita": 6590, - "##tors": 6591, - "suggest": 6592, - "##ct": 6593, - "discussion": 6594, - "staying": 6595, - "choir": 6596, - "lakes": 6597, - "jacket": 6598, - "revenue": 6599, - "nevertheless": 6600, - "peaked": 6601, - "instrument": 6602, - "wondering": 6603, - "annually": 6604, - "managing": 6605, - "neil": 6606, - "1891": 6607, - "signing": 6608, - "terry": 6609, - "##ice": 6610, - "apply": 6611, - "clinical": 6612, - "brooklyn": 6613, - "aim": 6614, - "catherine": 6615, - "fuck": 6616, - "farmers": 6617, - "figured": 6618, - "ninth": 6619, - "pride": 6620, - "hugh": 6621, - "evolution": 6622, - "ordinary": 6623, - "involvement": 6624, - "comfortable": 6625, - "shouted": 6626, - "tech": 6627, - "encouraged": 6628, - "taiwan": 6629, - "representation": 6630, - "sharing": 6631, - "##lia": 6632, - "##em": 6633, - "panic": 6634, - "exact": 6635, - "cargo": 6636, - "competing": 6637, - "fat": 6638, - "cried": 6639, - "83": 6640, - "1920s": 6641, - "occasions": 6642, - "pa": 6643, - "cabin": 6644, - "borders": 6645, - "utah": 6646, - "marcus": 6647, - "##isation": 6648, - "badly": 6649, - "muscles": 6650, - "##ance": 6651, - "victorian": 6652, - "transition": 6653, - "warner": 6654, - "bet": 6655, - "permission": 6656, - "##rin": 6657, - "slave": 6658, - "terrible": 6659, - "similarly": 6660, - "shares": 6661, - "seth": 6662, - "uefa": 6663, - "possession": 6664, - "medals": 6665, - "benefits": 6666, - "colleges": 6667, - "lowered": 6668, - "perfectly": 6669, - "mall": 6670, - "transit": 6671, - "##ye": 6672, - "##kar": 6673, - "publisher": 6674, - "##ened": 6675, - "harrison": 6676, - "deaths": 6677, - "elevation": 6678, - "##ae": 6679, - "asleep": 6680, - "machines": 6681, - "sigh": 6682, - "ash": 6683, - "hardly": 6684, - "argument": 6685, - "occasion": 6686, - "parent": 6687, - "leo": 6688, - "decline": 6689, - "1888": 6690, - "contribution": 6691, - "##ua": 6692, - "concentration": 6693, - "1000": 6694, - "opportunities": 6695, - "hispanic": 6696, - "guardian": 6697, - "extent": 6698, - "emotions": 6699, - "hips": 6700, - "mason": 6701, - "volumes": 6702, - "bloody": 6703, - "controversy": 6704, - "diameter": 6705, - "steady": 6706, - "mistake": 6707, - "phoenix": 6708, - "identify": 6709, - "violin": 6710, - "##sk": 6711, - "departure": 6712, - "richmond": 6713, - "spin": 6714, - "funeral": 6715, - "enemies": 6716, - "1864": 6717, - "gear": 6718, - "literally": 6719, - "connor": 6720, - "random": 6721, - "sergeant": 6722, - "grab": 6723, - "confusion": 6724, - "1865": 6725, - "transmission": 6726, - "informed": 6727, - "op": 6728, - "leaning": 6729, - "sacred": 6730, - "suspended": 6731, - "thinks": 6732, - "gates": 6733, - "portland": 6734, - "luck": 6735, - "agencies": 6736, - "yours": 6737, - "hull": 6738, - "expert": 6739, - "muscle": 6740, - "layer": 6741, - "practical": 6742, - "sculpture": 6743, - "jerusalem": 6744, - "latest": 6745, - "lloyd": 6746, - "statistics": 6747, - "deeper": 6748, - "recommended": 6749, - "warrior": 6750, - "arkansas": 6751, - "mess": 6752, - "supports": 6753, - "greg": 6754, - "eagle": 6755, - "1880": 6756, - "recovered": 6757, - "rated": 6758, - "concerts": 6759, - "rushed": 6760, - "##ano": 6761, - "stops": 6762, - "eggs": 6763, - "files": 6764, - "premiere": 6765, - "keith": 6766, - "##vo": 6767, - "delhi": 6768, - "turner": 6769, - "pit": 6770, - "affair": 6771, - "belief": 6772, - "paint": 6773, - "##zing": 6774, - "mate": 6775, - "##ach": 6776, - "##ev": 6777, - "victim": 6778, - "##ology": 6779, - "withdrew": 6780, - "bonus": 6781, - "styles": 6782, - "fled": 6783, - "##ud": 6784, - "glasgow": 6785, - "technologies": 6786, - "funded": 6787, - "nbc": 6788, - "adaptation": 6789, - "##ata": 6790, - "portrayed": 6791, - "cooperation": 6792, - "supporters": 6793, - "judges": 6794, - "bernard": 6795, - "justin": 6796, - "hallway": 6797, - "ralph": 6798, - "##ick": 6799, - "graduating": 6800, - "controversial": 6801, - "distant": 6802, - "continental": 6803, - "spider": 6804, - "bite": 6805, - "##ho": 6806, - "recognize": 6807, - "intention": 6808, - "mixing": 6809, - "##ese": 6810, - "egyptian": 6811, - "bow": 6812, - "tourism": 6813, - "suppose": 6814, - "claiming": 6815, - "tiger": 6816, - "dominated": 6817, - "participants": 6818, - "vi": 6819, - "##ru": 6820, - "nurse": 6821, - "partially": 6822, - "tape": 6823, - "##rum": 6824, - "psychology": 6825, - "##rn": 6826, - "essential": 6827, - "touring": 6828, - "duo": 6829, - "voting": 6830, - "civilian": 6831, - "emotional": 6832, - "channels": 6833, - "##king": 6834, - "apparent": 6835, - "hebrew": 6836, - "1887": 6837, - "tommy": 6838, - "carrier": 6839, - "intersection": 6840, - "beast": 6841, - "hudson": 6842, - "##gar": 6843, - "##zo": 6844, - "lab": 6845, - "nova": 6846, - "bench": 6847, - "discuss": 6848, - "costa": 6849, - "##ered": 6850, - "detailed": 6851, - "behalf": 6852, - "drivers": 6853, - "unfortunately": 6854, - "obtain": 6855, - "##lis": 6856, - "rocky": 6857, - "##dae": 6858, - "siege": 6859, - "friendship": 6860, - "honey": 6861, - "##rian": 6862, - "1861": 6863, - "amy": 6864, - "hang": 6865, - "posted": 6866, - "governments": 6867, - "collins": 6868, - "respond": 6869, - "wildlife": 6870, - "preferred": 6871, - "operator": 6872, - "##po": 6873, - "laura": 6874, - "pregnant": 6875, - "videos": 6876, - "dennis": 6877, - "suspected": 6878, - "boots": 6879, - "instantly": 6880, - "weird": 6881, - "automatic": 6882, - "businessman": 6883, - "alleged": 6884, - "placing": 6885, - "throwing": 6886, - "ph": 6887, - "mood": 6888, - "1862": 6889, - "perry": 6890, - "venue": 6891, - "jet": 6892, - "remainder": 6893, - "##lli": 6894, - "##ci": 6895, - "passion": 6896, - "biological": 6897, - "boyfriend": 6898, - "1863": 6899, - "dirt": 6900, - "buffalo": 6901, - "ron": 6902, - "segment": 6903, - "fa": 6904, - "abuse": 6905, - "##era": 6906, - "genre": 6907, - "thrown": 6908, - "stroke": 6909, - "colored": 6910, - "stress": 6911, - "exercise": 6912, - "displayed": 6913, - "##gen": 6914, - "struggled": 6915, - "##tti": 6916, - "abroad": 6917, - "dramatic": 6918, - "wonderful": 6919, - "thereafter": 6920, - "madrid": 6921, - "component": 6922, - "widespread": 6923, - "##sed": 6924, - "tale": 6925, - "citizen": 6926, - "todd": 6927, - "monday": 6928, - "1886": 6929, - "vancouver": 6930, - "overseas": 6931, - "forcing": 6932, - "crying": 6933, - "descent": 6934, - "##ris": 6935, - "discussed": 6936, - "substantial": 6937, - "ranks": 6938, - "regime": 6939, - "1870": 6940, - "provinces": 6941, - "switch": 6942, - "drum": 6943, - "zane": 6944, - "ted": 6945, - "tribes": 6946, - "proof": 6947, - "lp": 6948, - "cream": 6949, - "researchers": 6950, - "volunteer": 6951, - "manor": 6952, - "silk": 6953, - "milan": 6954, - "donated": 6955, - "allies": 6956, - "venture": 6957, - "principle": 6958, - "delivery": 6959, - "enterprise": 6960, - "##ves": 6961, - "##ans": 6962, - "bars": 6963, - "traditionally": 6964, - "witch": 6965, - "reminded": 6966, - "copper": 6967, - "##uk": 6968, - "pete": 6969, - "inter": 6970, - "links": 6971, - "colin": 6972, - "grinned": 6973, - "elsewhere": 6974, - "competitive": 6975, - "frequent": 6976, - "##oy": 6977, - "scream": 6978, - "##hu": 6979, - "tension": 6980, - "texts": 6981, - "submarine": 6982, - "finnish": 6983, - "defending": 6984, - "defend": 6985, - "pat": 6986, - "detail": 6987, - "1884": 6988, - "affiliated": 6989, - "stuart": 6990, - "themes": 6991, - "villa": 6992, - "periods": 6993, - "tool": 6994, - "belgian": 6995, - "ruling": 6996, - "crimes": 6997, - "answers": 6998, - "folded": 6999, - "licensed": 7000, - "resort": 7001, - "demolished": 7002, - "hans": 7003, - "lucy": 7004, - "1881": 7005, - "lion": 7006, - "traded": 7007, - "photographs": 7008, - "writes": 7009, - "craig": 7010, - "##fa": 7011, - "trials": 7012, - "generated": 7013, - "beth": 7014, - "noble": 7015, - "debt": 7016, - "percentage": 7017, - "yorkshire": 7018, - "erected": 7019, - "ss": 7020, - "viewed": 7021, - "grades": 7022, - "confidence": 7023, - "ceased": 7024, - "islam": 7025, - "telephone": 7026, - "retail": 7027, - "##ible": 7028, - "chile": 7029, - "m²": 7030, - "roberts": 7031, - "sixteen": 7032, - "##ich": 7033, - "commented": 7034, - "hampshire": 7035, - "innocent": 7036, - "dual": 7037, - "pounds": 7038, - "checked": 7039, - "regulations": 7040, - "afghanistan": 7041, - "sung": 7042, - "rico": 7043, - "liberty": 7044, - "assets": 7045, - "bigger": 7046, - "options": 7047, - "angels": 7048, - "relegated": 7049, - "tribute": 7050, - "wells": 7051, - "attending": 7052, - "leaf": 7053, - "##yan": 7054, - "butler": 7055, - "romanian": 7056, - "forum": 7057, - "monthly": 7058, - "lisa": 7059, - "patterns": 7060, - "gmina": 7061, - "##tory": 7062, - "madison": 7063, - "hurricane": 7064, - "rev": 7065, - "##ians": 7066, - "bristol": 7067, - "##ula": 7068, - "elite": 7069, - "valuable": 7070, - "disaster": 7071, - "democracy": 7072, - "awareness": 7073, - "germans": 7074, - "freyja": 7075, - "##ins": 7076, - "loop": 7077, - "absolutely": 7078, - "paying": 7079, - "populations": 7080, - "maine": 7081, - "sole": 7082, - "prayer": 7083, - "spencer": 7084, - "releases": 7085, - "doorway": 7086, - "bull": 7087, - "##ani": 7088, - "lover": 7089, - "midnight": 7090, - "conclusion": 7091, - "##sson": 7092, - "thirteen": 7093, - "lily": 7094, - "mediterranean": 7095, - "##lt": 7096, - "nhl": 7097, - "proud": 7098, - "sample": 7099, - "##hill": 7100, - "drummer": 7101, - "guinea": 7102, - "##ova": 7103, - "murphy": 7104, - "climb": 7105, - "##ston": 7106, - "instant": 7107, - "attributed": 7108, - "horn": 7109, - "ain": 7110, - "railways": 7111, - "steven": 7112, - "##ao": 7113, - "autumn": 7114, - "ferry": 7115, - "opponent": 7116, - "root": 7117, - "traveling": 7118, - "secured": 7119, - "corridor": 7120, - "stretched": 7121, - "tales": 7122, - "sheet": 7123, - "trinity": 7124, - "cattle": 7125, - "helps": 7126, - "indicates": 7127, - "manhattan": 7128, - "murdered": 7129, - "fitted": 7130, - "1882": 7131, - "gentle": 7132, - "grandmother": 7133, - "mines": 7134, - "shocked": 7135, - "vegas": 7136, - "produces": 7137, - "##light": 7138, - "caribbean": 7139, - "##ou": 7140, - "belong": 7141, - "continuous": 7142, - "desperate": 7143, - "drunk": 7144, - "historically": 7145, - "trio": 7146, - "waved": 7147, - "raf": 7148, - "dealing": 7149, - "nathan": 7150, - "bat": 7151, - "murmured": 7152, - "interrupted": 7153, - "residing": 7154, - "scientist": 7155, - "pioneer": 7156, - "harold": 7157, - "aaron": 7158, - "##net": 7159, - "delta": 7160, - "attempting": 7161, - "minority": 7162, - "mini": 7163, - "believes": 7164, - "chorus": 7165, - "tend": 7166, - "lots": 7167, - "eyed": 7168, - "indoor": 7169, - "load": 7170, - "shots": 7171, - "updated": 7172, - "jail": 7173, - "##llo": 7174, - "concerning": 7175, - "connecting": 7176, - "wealth": 7177, - "##ved": 7178, - "slaves": 7179, - "arrive": 7180, - "rangers": 7181, - "sufficient": 7182, - "rebuilt": 7183, - "##wick": 7184, - "cardinal": 7185, - "flood": 7186, - "muhammad": 7187, - "whenever": 7188, - "relation": 7189, - "runners": 7190, - "moral": 7191, - "repair": 7192, - "viewers": 7193, - "arriving": 7194, - "revenge": 7195, - "punk": 7196, - "assisted": 7197, - "bath": 7198, - "fairly": 7199, - "breathe": 7200, - "lists": 7201, - "innings": 7202, - "illustrated": 7203, - "whisper": 7204, - "nearest": 7205, - "voters": 7206, - "clinton": 7207, - "ties": 7208, - "ultimate": 7209, - "screamed": 7210, - "beijing": 7211, - "lions": 7212, - "andre": 7213, - "fictional": 7214, - "gathering": 7215, - "comfort": 7216, - "radar": 7217, - "suitable": 7218, - "dismissed": 7219, - "hms": 7220, - "ban": 7221, - "pine": 7222, - "wrist": 7223, - "atmosphere": 7224, - "voivodeship": 7225, - "bid": 7226, - "timber": 7227, - "##ned": 7228, - "##nan": 7229, - "giants": 7230, - "##ane": 7231, - "cameron": 7232, - "recovery": 7233, - "uss": 7234, - "identical": 7235, - "categories": 7236, - "switched": 7237, - "serbia": 7238, - "laughter": 7239, - "noah": 7240, - "ensemble": 7241, - "therapy": 7242, - "peoples": 7243, - "touching": 7244, - "##off": 7245, - "locally": 7246, - "pearl": 7247, - "platforms": 7248, - "everywhere": 7249, - "ballet": 7250, - "tables": 7251, - "lanka": 7252, - "herbert": 7253, - "outdoor": 7254, - "toured": 7255, - "derek": 7256, - "1883": 7257, - "spaces": 7258, - "contested": 7259, - "swept": 7260, - "1878": 7261, - "exclusive": 7262, - "slight": 7263, - "connections": 7264, - "##dra": 7265, - "winds": 7266, - "prisoner": 7267, - "collective": 7268, - "bangladesh": 7269, - "tube": 7270, - "publicly": 7271, - "wealthy": 7272, - "thai": 7273, - "##ys": 7274, - "isolated": 7275, - "select": 7276, - "##ric": 7277, - "insisted": 7278, - "pen": 7279, - "fortune": 7280, - "ticket": 7281, - "spotted": 7282, - "reportedly": 7283, - "animation": 7284, - "enforcement": 7285, - "tanks": 7286, - "110": 7287, - "decides": 7288, - "wider": 7289, - "lowest": 7290, - "owen": 7291, - "##time": 7292, - "nod": 7293, - "hitting": 7294, - "##hn": 7295, - "gregory": 7296, - "furthermore": 7297, - "magazines": 7298, - "fighters": 7299, - "solutions": 7300, - "##ery": 7301, - "pointing": 7302, - "requested": 7303, - "peru": 7304, - "reed": 7305, - "chancellor": 7306, - "knights": 7307, - "mask": 7308, - "worker": 7309, - "eldest": 7310, - "flames": 7311, - "reduction": 7312, - "1860": 7313, - "volunteers": 7314, - "##tis": 7315, - "reporting": 7316, - "##hl": 7317, - "wire": 7318, - "advisory": 7319, - "endemic": 7320, - "origins": 7321, - "settlers": 7322, - "pursue": 7323, - "knock": 7324, - "consumer": 7325, - "1876": 7326, - "eu": 7327, - "compound": 7328, - "creatures": 7329, - "mansion": 7330, - "sentenced": 7331, - "ivan": 7332, - "deployed": 7333, - "guitars": 7334, - "frowned": 7335, - "involves": 7336, - "mechanism": 7337, - "kilometers": 7338, - "perspective": 7339, - "shops": 7340, - "maps": 7341, - "terminus": 7342, - "duncan": 7343, - "alien": 7344, - "fist": 7345, - "bridges": 7346, - "##pers": 7347, - "heroes": 7348, - "fed": 7349, - "derby": 7350, - "swallowed": 7351, - "##ros": 7352, - "patent": 7353, - "sara": 7354, - "illness": 7355, - "characterized": 7356, - "adventures": 7357, - "slide": 7358, - "hawaii": 7359, - "jurisdiction": 7360, - "##op": 7361, - "organised": 7362, - "##side": 7363, - "adelaide": 7364, - "walks": 7365, - "biology": 7366, - "se": 7367, - "##ties": 7368, - "rogers": 7369, - "swing": 7370, - "tightly": 7371, - "boundaries": 7372, - "##rie": 7373, - "prepare": 7374, - "implementation": 7375, - "stolen": 7376, - "##sha": 7377, - "certified": 7378, - "colombia": 7379, - "edwards": 7380, - "garage": 7381, - "##mm": 7382, - "recalled": 7383, - "##ball": 7384, - "rage": 7385, - "harm": 7386, - "nigeria": 7387, - "breast": 7388, - "##ren": 7389, - "furniture": 7390, - "pupils": 7391, - "settle": 7392, - "##lus": 7393, - "cuba": 7394, - "balls": 7395, - "client": 7396, - "alaska": 7397, - "21st": 7398, - "linear": 7399, - "thrust": 7400, - "celebration": 7401, - "latino": 7402, - "genetic": 7403, - "terror": 7404, - "##cia": 7405, - "##ening": 7406, - "lightning": 7407, - "fee": 7408, - "witness": 7409, - "lodge": 7410, - "establishing": 7411, - "skull": 7412, - "##ique": 7413, - "earning": 7414, - "hood": 7415, - "##ei": 7416, - "rebellion": 7417, - "wang": 7418, - "sporting": 7419, - "warned": 7420, - "missile": 7421, - "devoted": 7422, - "activist": 7423, - "porch": 7424, - "worship": 7425, - "fourteen": 7426, - "package": 7427, - "1871": 7428, - "decorated": 7429, - "##shire": 7430, - "housed": 7431, - "##ock": 7432, - "chess": 7433, - "sailed": 7434, - "doctors": 7435, - "oscar": 7436, - "joan": 7437, - "treat": 7438, - "garcia": 7439, - "harbour": 7440, - "jeremy": 7441, - "##ire": 7442, - "traditions": 7443, - "dominant": 7444, - "jacques": 7445, - "##gon": 7446, - "##wan": 7447, - "relocated": 7448, - "1879": 7449, - "amendment": 7450, - "sized": 7451, - "companion": 7452, - "simultaneously": 7453, - "volleyball": 7454, - "spun": 7455, - "acre": 7456, - "increases": 7457, - "stopping": 7458, - "loves": 7459, - "belongs": 7460, - "affect": 7461, - "drafted": 7462, - "tossed": 7463, - "scout": 7464, - "battles": 7465, - "1875": 7466, - "filming": 7467, - "shoved": 7468, - "munich": 7469, - "tenure": 7470, - "vertical": 7471, - "romance": 7472, - "pc": 7473, - "##cher": 7474, - "argue": 7475, - "##ical": 7476, - "craft": 7477, - "ranging": 7478, - "www": 7479, - "opens": 7480, - "honest": 7481, - "tyler": 7482, - "yesterday": 7483, - "virtual": 7484, - "##let": 7485, - "muslims": 7486, - "reveal": 7487, - "snake": 7488, - "immigrants": 7489, - "radical": 7490, - "screaming": 7491, - "speakers": 7492, - "firing": 7493, - "saving": 7494, - "belonging": 7495, - "ease": 7496, - "lighting": 7497, - "prefecture": 7498, - "blame": 7499, - "farmer": 7500, - "hungry": 7501, - "grows": 7502, - "rubbed": 7503, - "beam": 7504, - "sur": 7505, - "subsidiary": 7506, - "##cha": 7507, - "armenian": 7508, - "sao": 7509, - "dropping": 7510, - "conventional": 7511, - "##fer": 7512, - "microsoft": 7513, - "reply": 7514, - "qualify": 7515, - "spots": 7516, - "1867": 7517, - "sweat": 7518, - "festivals": 7519, - "##ken": 7520, - "immigration": 7521, - "physician": 7522, - "discover": 7523, - "exposure": 7524, - "sandy": 7525, - "explanation": 7526, - "isaac": 7527, - "implemented": 7528, - "##fish": 7529, - "hart": 7530, - "initiated": 7531, - "connect": 7532, - "stakes": 7533, - "presents": 7534, - "heights": 7535, - "householder": 7536, - "pleased": 7537, - "tourist": 7538, - "regardless": 7539, - "slip": 7540, - "closest": 7541, - "##ction": 7542, - "surely": 7543, - "sultan": 7544, - "brings": 7545, - "riley": 7546, - "preparation": 7547, - "aboard": 7548, - "slammed": 7549, - "baptist": 7550, - "experiment": 7551, - "ongoing": 7552, - "interstate": 7553, - "organic": 7554, - "playoffs": 7555, - "##ika": 7556, - "1877": 7557, - "130": 7558, - "##tar": 7559, - "hindu": 7560, - "error": 7561, - "tours": 7562, - "tier": 7563, - "plenty": 7564, - "arrangements": 7565, - "talks": 7566, - "trapped": 7567, - "excited": 7568, - "sank": 7569, - "ho": 7570, - "athens": 7571, - "1872": 7572, - "denver": 7573, - "welfare": 7574, - "suburb": 7575, - "athletes": 7576, - "trick": 7577, - "diverse": 7578, - "belly": 7579, - "exclusively": 7580, - "yelled": 7581, - "1868": 7582, - "##med": 7583, - "conversion": 7584, - "##ette": 7585, - "1874": 7586, - "internationally": 7587, - "computers": 7588, - "conductor": 7589, - "abilities": 7590, - "sensitive": 7591, - "hello": 7592, - "dispute": 7593, - "measured": 7594, - "globe": 7595, - "rocket": 7596, - "prices": 7597, - "amsterdam": 7598, - "flights": 7599, - "tigers": 7600, - "inn": 7601, - "municipalities": 7602, - "emotion": 7603, - "references": 7604, - "3d": 7605, - "##mus": 7606, - "explains": 7607, - "airlines": 7608, - "manufactured": 7609, - "pm": 7610, - "archaeological": 7611, - "1873": 7612, - "interpretation": 7613, - "devon": 7614, - "comment": 7615, - "##ites": 7616, - "settlements": 7617, - "kissing": 7618, - "absolute": 7619, - "improvement": 7620, - "suite": 7621, - "impressed": 7622, - "barcelona": 7623, - "sullivan": 7624, - "jefferson": 7625, - "towers": 7626, - "jesse": 7627, - "julie": 7628, - "##tin": 7629, - "##lu": 7630, - "grandson": 7631, - "hi": 7632, - "gauge": 7633, - "regard": 7634, - "rings": 7635, - "interviews": 7636, - "trace": 7637, - "raymond": 7638, - "thumb": 7639, - "departments": 7640, - "burns": 7641, - "serial": 7642, - "bulgarian": 7643, - "scores": 7644, - "demonstrated": 7645, - "##ix": 7646, - "1866": 7647, - "kyle": 7648, - "alberta": 7649, - "underneath": 7650, - "romanized": 7651, - "##ward": 7652, - "relieved": 7653, - "acquisition": 7654, - "phrase": 7655, - "cliff": 7656, - "reveals": 7657, - "han": 7658, - "cuts": 7659, - "merger": 7660, - "custom": 7661, - "##dar": 7662, - "nee": 7663, - "gilbert": 7664, - "graduation": 7665, - "##nts": 7666, - "assessment": 7667, - "cafe": 7668, - "difficulty": 7669, - "demands": 7670, - "swung": 7671, - "democrat": 7672, - "jennifer": 7673, - "commons": 7674, - "1940s": 7675, - "grove": 7676, - "##yo": 7677, - "completing": 7678, - "focuses": 7679, - "sum": 7680, - "substitute": 7681, - "bearing": 7682, - "stretch": 7683, - "reception": 7684, - "##py": 7685, - "reflected": 7686, - "essentially": 7687, - "destination": 7688, - "pairs": 7689, - "##ched": 7690, - "survival": 7691, - "resource": 7692, - "##bach": 7693, - "promoting": 7694, - "doubles": 7695, - "messages": 7696, - "tear": 7697, - "##down": 7698, - "##fully": 7699, - "parade": 7700, - "florence": 7701, - "harvey": 7702, - "incumbent": 7703, - "partial": 7704, - "framework": 7705, - "900": 7706, - "pedro": 7707, - "frozen": 7708, - "procedure": 7709, - "olivia": 7710, - "controls": 7711, - "##mic": 7712, - "shelter": 7713, - "personally": 7714, - "temperatures": 7715, - "##od": 7716, - "brisbane": 7717, - "tested": 7718, - "sits": 7719, - "marble": 7720, - "comprehensive": 7721, - "oxygen": 7722, - "leonard": 7723, - "##kov": 7724, - "inaugural": 7725, - "iranian": 7726, - "referring": 7727, - "quarters": 7728, - "attitude": 7729, - "##ivity": 7730, - "mainstream": 7731, - "lined": 7732, - "mars": 7733, - "dakota": 7734, - "norfolk": 7735, - "unsuccessful": 7736, - "##°": 7737, - "explosion": 7738, - "helicopter": 7739, - "congressional": 7740, - "##sing": 7741, - "inspector": 7742, - "bitch": 7743, - "seal": 7744, - "departed": 7745, - "divine": 7746, - "##ters": 7747, - "coaching": 7748, - "examination": 7749, - "punishment": 7750, - "manufacturer": 7751, - "sink": 7752, - "columns": 7753, - "unincorporated": 7754, - "signals": 7755, - "nevada": 7756, - "squeezed": 7757, - "dylan": 7758, - "dining": 7759, - "photos": 7760, - "martial": 7761, - "manuel": 7762, - "eighteen": 7763, - "elevator": 7764, - "brushed": 7765, - "plates": 7766, - "ministers": 7767, - "ivy": 7768, - "congregation": 7769, - "##len": 7770, - "slept": 7771, - "specialized": 7772, - "taxes": 7773, - "curve": 7774, - "restricted": 7775, - "negotiations": 7776, - "likes": 7777, - "statistical": 7778, - "arnold": 7779, - "inspiration": 7780, - "execution": 7781, - "bold": 7782, - "intermediate": 7783, - "significance": 7784, - "margin": 7785, - "ruler": 7786, - "wheels": 7787, - "gothic": 7788, - "intellectual": 7789, - "dependent": 7790, - "listened": 7791, - "eligible": 7792, - "buses": 7793, - "widow": 7794, - "syria": 7795, - "earn": 7796, - "cincinnati": 7797, - "collapsed": 7798, - "recipient": 7799, - "secrets": 7800, - "accessible": 7801, - "philippine": 7802, - "maritime": 7803, - "goddess": 7804, - "clerk": 7805, - "surrender": 7806, - "breaks": 7807, - "playoff": 7808, - "database": 7809, - "##ified": 7810, - "##lon": 7811, - "ideal": 7812, - "beetle": 7813, - "aspect": 7814, - "soap": 7815, - "regulation": 7816, - "strings": 7817, - "expand": 7818, - "anglo": 7819, - "shorter": 7820, - "crosses": 7821, - "retreat": 7822, - "tough": 7823, - "coins": 7824, - "wallace": 7825, - "directions": 7826, - "pressing": 7827, - "##oon": 7828, - "shipping": 7829, - "locomotives": 7830, - "comparison": 7831, - "topics": 7832, - "nephew": 7833, - "##mes": 7834, - "distinction": 7835, - "honors": 7836, - "travelled": 7837, - "sierra": 7838, - "ibn": 7839, - "##over": 7840, - "fortress": 7841, - "sa": 7842, - "recognised": 7843, - "carved": 7844, - "1869": 7845, - "clients": 7846, - "##dan": 7847, - "intent": 7848, - "##mar": 7849, - "coaches": 7850, - "describing": 7851, - "bread": 7852, - "##ington": 7853, - "beaten": 7854, - "northwestern": 7855, - "##ona": 7856, - "merit": 7857, - "youtube": 7858, - "collapse": 7859, - "challenges": 7860, - "em": 7861, - "historians": 7862, - "objective": 7863, - "submitted": 7864, - "virus": 7865, - "attacking": 7866, - "drake": 7867, - "assume": 7868, - "##ere": 7869, - "diseases": 7870, - "marc": 7871, - "stem": 7872, - "leeds": 7873, - "##cus": 7874, - "##ab": 7875, - "farming": 7876, - "glasses": 7877, - "##lock": 7878, - "visits": 7879, - "nowhere": 7880, - "fellowship": 7881, - "relevant": 7882, - "carries": 7883, - "restaurants": 7884, - "experiments": 7885, - "101": 7886, - "constantly": 7887, - "bases": 7888, - "targets": 7889, - "shah": 7890, - "tenth": 7891, - "opponents": 7892, - "verse": 7893, - "territorial": 7894, - "##ira": 7895, - "writings": 7896, - "corruption": 7897, - "##hs": 7898, - "instruction": 7899, - "inherited": 7900, - "reverse": 7901, - "emphasis": 7902, - "##vic": 7903, - "employee": 7904, - "arch": 7905, - "keeps": 7906, - "rabbi": 7907, - "watson": 7908, - "payment": 7909, - "uh": 7910, - "##ala": 7911, - "nancy": 7912, - "##tre": 7913, - "venice": 7914, - "fastest": 7915, - "sexy": 7916, - "banned": 7917, - "adrian": 7918, - "properly": 7919, - "ruth": 7920, - "touchdown": 7921, - "dollar": 7922, - "boards": 7923, - "metre": 7924, - "circles": 7925, - "edges": 7926, - "favour": 7927, - "comments": 7928, - "ok": 7929, - "travels": 7930, - "liberation": 7931, - "scattered": 7932, - "firmly": 7933, - "##ular": 7934, - "holland": 7935, - "permitted": 7936, - "diesel": 7937, - "kenya": 7938, - "den": 7939, - "originated": 7940, - "##ral": 7941, - "demons": 7942, - "resumed": 7943, - "dragged": 7944, - "rider": 7945, - "##rus": 7946, - "servant": 7947, - "blinked": 7948, - "extend": 7949, - "torn": 7950, - "##ias": 7951, - "##sey": 7952, - "input": 7953, - "meal": 7954, - "everybody": 7955, - "cylinder": 7956, - "kinds": 7957, - "camps": 7958, - "##fe": 7959, - "bullet": 7960, - "logic": 7961, - "##wn": 7962, - "croatian": 7963, - "evolved": 7964, - "healthy": 7965, - "fool": 7966, - "chocolate": 7967, - "wise": 7968, - "preserve": 7969, - "pradesh": 7970, - "##ess": 7971, - "respective": 7972, - "1850": 7973, - "##ew": 7974, - "chicken": 7975, - "artificial": 7976, - "gross": 7977, - "corresponding": 7978, - "convicted": 7979, - "cage": 7980, - "caroline": 7981, - "dialogue": 7982, - "##dor": 7983, - "narrative": 7984, - "stranger": 7985, - "mario": 7986, - "br": 7987, - "christianity": 7988, - "failing": 7989, - "trent": 7990, - "commanding": 7991, - "buddhist": 7992, - "1848": 7993, - "maurice": 7994, - "focusing": 7995, - "yale": 7996, - "bike": 7997, - "altitude": 7998, - "##ering": 7999, - "mouse": 8000, - "revised": 8001, - "##sley": 8002, - "veteran": 8003, - "##ig": 8004, - "pulls": 8005, - "theology": 8006, - "crashed": 8007, - "campaigns": 8008, - "legion": 8009, - "##ability": 8010, - "drag": 8011, - "excellence": 8012, - "customer": 8013, - "cancelled": 8014, - "intensity": 8015, - "excuse": 8016, - "##lar": 8017, - "liga": 8018, - "participating": 8019, - "contributing": 8020, - "printing": 8021, - "##burn": 8022, - "variable": 8023, - "##rk": 8024, - "curious": 8025, - "bin": 8026, - "legacy": 8027, - "renaissance": 8028, - "##my": 8029, - "symptoms": 8030, - "binding": 8031, - "vocalist": 8032, - "dancer": 8033, - "##nie": 8034, - "grammar": 8035, - "gospel": 8036, - "democrats": 8037, - "ya": 8038, - "enters": 8039, - "sc": 8040, - "diplomatic": 8041, - "hitler": 8042, - "##ser": 8043, - "clouds": 8044, - "mathematical": 8045, - "quit": 8046, - "defended": 8047, - "oriented": 8048, - "##heim": 8049, - "fundamental": 8050, - "hardware": 8051, - "impressive": 8052, - "equally": 8053, - "convince": 8054, - "confederate": 8055, - "guilt": 8056, - "chuck": 8057, - "sliding": 8058, - "##ware": 8059, - "magnetic": 8060, - "narrowed": 8061, - "petersburg": 8062, - "bulgaria": 8063, - "otto": 8064, - "phd": 8065, - "skill": 8066, - "##ama": 8067, - "reader": 8068, - "hopes": 8069, - "pitcher": 8070, - "reservoir": 8071, - "hearts": 8072, - "automatically": 8073, - "expecting": 8074, - "mysterious": 8075, - "bennett": 8076, - "extensively": 8077, - "imagined": 8078, - "seeds": 8079, - "monitor": 8080, - "fix": 8081, - "##ative": 8082, - "journalism": 8083, - "struggling": 8084, - "signature": 8085, - "ranch": 8086, - "encounter": 8087, - "photographer": 8088, - "observation": 8089, - "protests": 8090, - "##pin": 8091, - "influences": 8092, - "##hr": 8093, - "calendar": 8094, - "##all": 8095, - "cruz": 8096, - "croatia": 8097, - "locomotive": 8098, - "hughes": 8099, - "naturally": 8100, - "shakespeare": 8101, - "basement": 8102, - "hook": 8103, - "uncredited": 8104, - "faded": 8105, - "theories": 8106, - "approaches": 8107, - "dare": 8108, - "phillips": 8109, - "filling": 8110, - "fury": 8111, - "obama": 8112, - "##ain": 8113, - "efficient": 8114, - "arc": 8115, - "deliver": 8116, - "min": 8117, - "raid": 8118, - "breeding": 8119, - "inducted": 8120, - "leagues": 8121, - "efficiency": 8122, - "axis": 8123, - "montana": 8124, - "eagles": 8125, - "##ked": 8126, - "supplied": 8127, - "instructions": 8128, - "karen": 8129, - "picking": 8130, - "indicating": 8131, - "trap": 8132, - "anchor": 8133, - "practically": 8134, - "christians": 8135, - "tomb": 8136, - "vary": 8137, - "occasional": 8138, - "electronics": 8139, - "lords": 8140, - "readers": 8141, - "newcastle": 8142, - "faint": 8143, - "innovation": 8144, - "collect": 8145, - "situations": 8146, - "engagement": 8147, - "160": 8148, - "claude": 8149, - "mixture": 8150, - "##feld": 8151, - "peer": 8152, - "tissue": 8153, - "logo": 8154, - "lean": 8155, - "##ration": 8156, - "°f": 8157, - "floors": 8158, - "##ven": 8159, - "architects": 8160, - "reducing": 8161, - "##our": 8162, - "##ments": 8163, - "rope": 8164, - "1859": 8165, - "ottawa": 8166, - "##har": 8167, - "samples": 8168, - "banking": 8169, - "declaration": 8170, - "proteins": 8171, - "resignation": 8172, - "francois": 8173, - "saudi": 8174, - "advocate": 8175, - "exhibited": 8176, - "armor": 8177, - "twins": 8178, - "divorce": 8179, - "##ras": 8180, - "abraham": 8181, - "reviewed": 8182, - "jo": 8183, - "temporarily": 8184, - "matrix": 8185, - "physically": 8186, - "pulse": 8187, - "curled": 8188, - "##ena": 8189, - "difficulties": 8190, - "bengal": 8191, - "usage": 8192, - "##ban": 8193, - "annie": 8194, - "riders": 8195, - "certificate": 8196, - "##pi": 8197, - "holes": 8198, - "warsaw": 8199, - "distinctive": 8200, - "jessica": 8201, - "##mon": 8202, - "mutual": 8203, - "1857": 8204, - "customs": 8205, - "circular": 8206, - "eugene": 8207, - "removal": 8208, - "loaded": 8209, - "mere": 8210, - "vulnerable": 8211, - "depicted": 8212, - "generations": 8213, - "dame": 8214, - "heir": 8215, - "enormous": 8216, - "lightly": 8217, - "climbing": 8218, - "pitched": 8219, - "lessons": 8220, - "pilots": 8221, - "nepal": 8222, - "ram": 8223, - "google": 8224, - "preparing": 8225, - "brad": 8226, - "louise": 8227, - "renowned": 8228, - "##₂": 8229, - "liam": 8230, - "##ably": 8231, - "plaza": 8232, - "shaw": 8233, - "sophie": 8234, - "brilliant": 8235, - "bills": 8236, - "##bar": 8237, - "##nik": 8238, - "fucking": 8239, - "mainland": 8240, - "server": 8241, - "pleasant": 8242, - "seized": 8243, - "veterans": 8244, - "jerked": 8245, - "fail": 8246, - "beta": 8247, - "brush": 8248, - "radiation": 8249, - "stored": 8250, - "warmth": 8251, - "southeastern": 8252, - "nate": 8253, - "sin": 8254, - "raced": 8255, - "berkeley": 8256, - "joke": 8257, - "athlete": 8258, - "designation": 8259, - "trunk": 8260, - "##low": 8261, - "roland": 8262, - "qualification": 8263, - "archives": 8264, - "heels": 8265, - "artwork": 8266, - "receives": 8267, - "judicial": 8268, - "reserves": 8269, - "##bed": 8270, - "woke": 8271, - "installation": 8272, - "abu": 8273, - "floating": 8274, - "fake": 8275, - "lesser": 8276, - "excitement": 8277, - "interface": 8278, - "concentrated": 8279, - "addressed": 8280, - "characteristic": 8281, - "amanda": 8282, - "saxophone": 8283, - "monk": 8284, - "auto": 8285, - "##bus": 8286, - "releasing": 8287, - "egg": 8288, - "dies": 8289, - "interaction": 8290, - "defender": 8291, - "ce": 8292, - "outbreak": 8293, - "glory": 8294, - "loving": 8295, - "##bert": 8296, - "sequel": 8297, - "consciousness": 8298, - "http": 8299, - "awake": 8300, - "ski": 8301, - "enrolled": 8302, - "##ress": 8303, - "handling": 8304, - "rookie": 8305, - "brow": 8306, - "somebody": 8307, - "biography": 8308, - "warfare": 8309, - "amounts": 8310, - "contracts": 8311, - "presentation": 8312, - "fabric": 8313, - "dissolved": 8314, - "challenged": 8315, - "meter": 8316, - "psychological": 8317, - "lt": 8318, - "elevated": 8319, - "rally": 8320, - "accurate": 8321, - "##tha": 8322, - "hospitals": 8323, - "undergraduate": 8324, - "specialist": 8325, - "venezuela": 8326, - "exhibit": 8327, - "shed": 8328, - "nursing": 8329, - "protestant": 8330, - "fluid": 8331, - "structural": 8332, - "footage": 8333, - "jared": 8334, - "consistent": 8335, - "prey": 8336, - "##ska": 8337, - "succession": 8338, - "reflect": 8339, - "exile": 8340, - "lebanon": 8341, - "wiped": 8342, - "suspect": 8343, - "shanghai": 8344, - "resting": 8345, - "integration": 8346, - "preservation": 8347, - "marvel": 8348, - "variant": 8349, - "pirates": 8350, - "sheep": 8351, - "rounded": 8352, - "capita": 8353, - "sailing": 8354, - "colonies": 8355, - "manuscript": 8356, - "deemed": 8357, - "variations": 8358, - "clarke": 8359, - "functional": 8360, - "emerging": 8361, - "boxing": 8362, - "relaxed": 8363, - "curse": 8364, - "azerbaijan": 8365, - "heavyweight": 8366, - "nickname": 8367, - "editorial": 8368, - "rang": 8369, - "grid": 8370, - "tightened": 8371, - "earthquake": 8372, - "flashed": 8373, - "miguel": 8374, - "rushing": 8375, - "##ches": 8376, - "improvements": 8377, - "boxes": 8378, - "brooks": 8379, - "180": 8380, - "consumption": 8381, - "molecular": 8382, - "felix": 8383, - "societies": 8384, - "repeatedly": 8385, - "variation": 8386, - "aids": 8387, - "civic": 8388, - "graphics": 8389, - "professionals": 8390, - "realm": 8391, - "autonomous": 8392, - "receiver": 8393, - "delayed": 8394, - "workshop": 8395, - "militia": 8396, - "chairs": 8397, - "trump": 8398, - "canyon": 8399, - "##point": 8400, - "harsh": 8401, - "extending": 8402, - "lovely": 8403, - "happiness": 8404, - "##jan": 8405, - "stake": 8406, - "eyebrows": 8407, - "embassy": 8408, - "wellington": 8409, - "hannah": 8410, - "##ella": 8411, - "sony": 8412, - "corners": 8413, - "bishops": 8414, - "swear": 8415, - "cloth": 8416, - "contents": 8417, - "xi": 8418, - "namely": 8419, - "commenced": 8420, - "1854": 8421, - "stanford": 8422, - "nashville": 8423, - "courage": 8424, - "graphic": 8425, - "commitment": 8426, - "garrison": 8427, - "##bin": 8428, - "hamlet": 8429, - "clearing": 8430, - "rebels": 8431, - "attraction": 8432, - "literacy": 8433, - "cooking": 8434, - "ruins": 8435, - "temples": 8436, - "jenny": 8437, - "humanity": 8438, - "celebrate": 8439, - "hasn": 8440, - "freight": 8441, - "sixty": 8442, - "rebel": 8443, - "bastard": 8444, - "##art": 8445, - "newton": 8446, - "##ada": 8447, - "deer": 8448, - "##ges": 8449, - "##ching": 8450, - "smiles": 8451, - "delaware": 8452, - "singers": 8453, - "##ets": 8454, - "approaching": 8455, - "assists": 8456, - "flame": 8457, - "##ph": 8458, - "boulevard": 8459, - "barrel": 8460, - "planted": 8461, - "##ome": 8462, - "pursuit": 8463, - "##sia": 8464, - "consequences": 8465, - "posts": 8466, - "shallow": 8467, - "invitation": 8468, - "rode": 8469, - "depot": 8470, - "ernest": 8471, - "kane": 8472, - "rod": 8473, - "concepts": 8474, - "preston": 8475, - "topic": 8476, - "chambers": 8477, - "striking": 8478, - "blast": 8479, - "arrives": 8480, - "descendants": 8481, - "montgomery": 8482, - "ranges": 8483, - "worlds": 8484, - "##lay": 8485, - "##ari": 8486, - "span": 8487, - "chaos": 8488, - "praise": 8489, - "##ag": 8490, - "fewer": 8491, - "1855": 8492, - "sanctuary": 8493, - "mud": 8494, - "fbi": 8495, - "##ions": 8496, - "programmes": 8497, - "maintaining": 8498, - "unity": 8499, - "harper": 8500, - "bore": 8501, - "handsome": 8502, - "closure": 8503, - "tournaments": 8504, - "thunder": 8505, - "nebraska": 8506, - "linda": 8507, - "facade": 8508, - "puts": 8509, - "satisfied": 8510, - "argentine": 8511, - "dale": 8512, - "cork": 8513, - "dome": 8514, - "panama": 8515, - "##yl": 8516, - "1858": 8517, - "tasks": 8518, - "experts": 8519, - "##ates": 8520, - "feeding": 8521, - "equation": 8522, - "##las": 8523, - "##ida": 8524, - "##tu": 8525, - "engage": 8526, - "bryan": 8527, - "##ax": 8528, - "um": 8529, - "quartet": 8530, - "melody": 8531, - "disbanded": 8532, - "sheffield": 8533, - "blocked": 8534, - "gasped": 8535, - "delay": 8536, - "kisses": 8537, - "maggie": 8538, - "connects": 8539, - "##non": 8540, - "sts": 8541, - "poured": 8542, - "creator": 8543, - "publishers": 8544, - "##we": 8545, - "guided": 8546, - "ellis": 8547, - "extinct": 8548, - "hug": 8549, - "gaining": 8550, - "##ord": 8551, - "complicated": 8552, - "##bility": 8553, - "poll": 8554, - "clenched": 8555, - "investigate": 8556, - "##use": 8557, - "thereby": 8558, - "quantum": 8559, - "spine": 8560, - "cdp": 8561, - "humor": 8562, - "kills": 8563, - "administered": 8564, - "semifinals": 8565, - "##du": 8566, - "encountered": 8567, - "ignore": 8568, - "##bu": 8569, - "commentary": 8570, - "##maker": 8571, - "bother": 8572, - "roosevelt": 8573, - "140": 8574, - "plains": 8575, - "halfway": 8576, - "flowing": 8577, - "cultures": 8578, - "crack": 8579, - "imprisoned": 8580, - "neighboring": 8581, - "airline": 8582, - "##ses": 8583, - "##view": 8584, - "##mate": 8585, - "##ec": 8586, - "gather": 8587, - "wolves": 8588, - "marathon": 8589, - "transformed": 8590, - "##ill": 8591, - "cruise": 8592, - "organisations": 8593, - "carol": 8594, - "punch": 8595, - "exhibitions": 8596, - "numbered": 8597, - "alarm": 8598, - "ratings": 8599, - "daddy": 8600, - "silently": 8601, - "##stein": 8602, - "queens": 8603, - "colours": 8604, - "impression": 8605, - "guidance": 8606, - "liu": 8607, - "tactical": 8608, - "##rat": 8609, - "marshal": 8610, - "della": 8611, - "arrow": 8612, - "##ings": 8613, - "rested": 8614, - "feared": 8615, - "tender": 8616, - "owns": 8617, - "bitter": 8618, - "advisor": 8619, - "escort": 8620, - "##ides": 8621, - "spare": 8622, - "farms": 8623, - "grants": 8624, - "##ene": 8625, - "dragons": 8626, - "encourage": 8627, - "colleagues": 8628, - "cameras": 8629, - "##und": 8630, - "sucked": 8631, - "pile": 8632, - "spirits": 8633, - "prague": 8634, - "statements": 8635, - "suspension": 8636, - "landmark": 8637, - "fence": 8638, - "torture": 8639, - "recreation": 8640, - "bags": 8641, - "permanently": 8642, - "survivors": 8643, - "pond": 8644, - "spy": 8645, - "predecessor": 8646, - "bombing": 8647, - "coup": 8648, - "##og": 8649, - "protecting": 8650, - "transformation": 8651, - "glow": 8652, - "##lands": 8653, - "##book": 8654, - "dug": 8655, - "priests": 8656, - "andrea": 8657, - "feat": 8658, - "barn": 8659, - "jumping": 8660, - "##chen": 8661, - "##ologist": 8662, - "##con": 8663, - "casualties": 8664, - "stern": 8665, - "auckland": 8666, - "pipe": 8667, - "serie": 8668, - "revealing": 8669, - "ba": 8670, - "##bel": 8671, - "trevor": 8672, - "mercy": 8673, - "spectrum": 8674, - "yang": 8675, - "consist": 8676, - "governing": 8677, - "collaborated": 8678, - "possessed": 8679, - "epic": 8680, - "comprises": 8681, - "blew": 8682, - "shane": 8683, - "##ack": 8684, - "lopez": 8685, - "honored": 8686, - "magical": 8687, - "sacrifice": 8688, - "judgment": 8689, - "perceived": 8690, - "hammer": 8691, - "mtv": 8692, - "baronet": 8693, - "tune": 8694, - "das": 8695, - "missionary": 8696, - "sheets": 8697, - "350": 8698, - "neutral": 8699, - "oral": 8700, - "threatening": 8701, - "attractive": 8702, - "shade": 8703, - "aims": 8704, - "seminary": 8705, - "##master": 8706, - "estates": 8707, - "1856": 8708, - "michel": 8709, - "wounds": 8710, - "refugees": 8711, - "manufacturers": 8712, - "##nic": 8713, - "mercury": 8714, - "syndrome": 8715, - "porter": 8716, - "##iya": 8717, - "##din": 8718, - "hamburg": 8719, - "identification": 8720, - "upstairs": 8721, - "purse": 8722, - "widened": 8723, - "pause": 8724, - "cared": 8725, - "breathed": 8726, - "affiliate": 8727, - "santiago": 8728, - "prevented": 8729, - "celtic": 8730, - "fisher": 8731, - "125": 8732, - "recruited": 8733, - "byzantine": 8734, - "reconstruction": 8735, - "farther": 8736, - "##mp": 8737, - "diet": 8738, - "sake": 8739, - "au": 8740, - "spite": 8741, - "sensation": 8742, - "##ert": 8743, - "blank": 8744, - "separation": 8745, - "105": 8746, - "##hon": 8747, - "vladimir": 8748, - "armies": 8749, - "anime": 8750, - "##lie": 8751, - "accommodate": 8752, - "orbit": 8753, - "cult": 8754, - "sofia": 8755, - "archive": 8756, - "##ify": 8757, - "##box": 8758, - "founders": 8759, - "sustained": 8760, - "disorder": 8761, - "honours": 8762, - "northeastern": 8763, - "mia": 8764, - "crops": 8765, - "violet": 8766, - "threats": 8767, - "blanket": 8768, - "fires": 8769, - "canton": 8770, - "followers": 8771, - "southwestern": 8772, - "prototype": 8773, - "voyage": 8774, - "assignment": 8775, - "altered": 8776, - "moderate": 8777, - "protocol": 8778, - "pistol": 8779, - "##eo": 8780, - "questioned": 8781, - "brass": 8782, - "lifting": 8783, - "1852": 8784, - "math": 8785, - "authored": 8786, - "##ual": 8787, - "doug": 8788, - "dimensional": 8789, - "dynamic": 8790, - "##san": 8791, - "1851": 8792, - "pronounced": 8793, - "grateful": 8794, - "quest": 8795, - "uncomfortable": 8796, - "boom": 8797, - "presidency": 8798, - "stevens": 8799, - "relating": 8800, - "politicians": 8801, - "chen": 8802, - "barrier": 8803, - "quinn": 8804, - "diana": 8805, - "mosque": 8806, - "tribal": 8807, - "cheese": 8808, - "palmer": 8809, - "portions": 8810, - "sometime": 8811, - "chester": 8812, - "treasure": 8813, - "wu": 8814, - "bend": 8815, - "download": 8816, - "millions": 8817, - "reforms": 8818, - "registration": 8819, - "##osa": 8820, - "consequently": 8821, - "monitoring": 8822, - "ate": 8823, - "preliminary": 8824, - "brandon": 8825, - "invented": 8826, - "ps": 8827, - "eaten": 8828, - "exterior": 8829, - "intervention": 8830, - "ports": 8831, - "documented": 8832, - "log": 8833, - "displays": 8834, - "lecture": 8835, - "sally": 8836, - "favourite": 8837, - "##itz": 8838, - "vermont": 8839, - "lo": 8840, - "invisible": 8841, - "isle": 8842, - "breed": 8843, - "##ator": 8844, - "journalists": 8845, - "relay": 8846, - "speaks": 8847, - "backward": 8848, - "explore": 8849, - "midfielder": 8850, - "actively": 8851, - "stefan": 8852, - "procedures": 8853, - "cannon": 8854, - "blond": 8855, - "kenneth": 8856, - "centered": 8857, - "servants": 8858, - "chains": 8859, - "libraries": 8860, - "malcolm": 8861, - "essex": 8862, - "henri": 8863, - "slavery": 8864, - "##hal": 8865, - "facts": 8866, - "fairy": 8867, - "coached": 8868, - "cassie": 8869, - "cats": 8870, - "washed": 8871, - "cop": 8872, - "##fi": 8873, - "announcement": 8874, - "item": 8875, - "2000s": 8876, - "vinyl": 8877, - "activated": 8878, - "marco": 8879, - "frontier": 8880, - "growled": 8881, - "curriculum": 8882, - "##das": 8883, - "loyal": 8884, - "accomplished": 8885, - "leslie": 8886, - "ritual": 8887, - "kenny": 8888, - "##00": 8889, - "vii": 8890, - "napoleon": 8891, - "hollow": 8892, - "hybrid": 8893, - "jungle": 8894, - "stationed": 8895, - "friedrich": 8896, - "counted": 8897, - "##ulated": 8898, - "platinum": 8899, - "theatrical": 8900, - "seated": 8901, - "col": 8902, - "rubber": 8903, - "glen": 8904, - "1840": 8905, - "diversity": 8906, - "healing": 8907, - "extends": 8908, - "id": 8909, - "provisions": 8910, - "administrator": 8911, - "columbus": 8912, - "##oe": 8913, - "tributary": 8914, - "te": 8915, - "assured": 8916, - "org": 8917, - "##uous": 8918, - "prestigious": 8919, - "examined": 8920, - "lectures": 8921, - "grammy": 8922, - "ronald": 8923, - "associations": 8924, - "bailey": 8925, - "allan": 8926, - "essays": 8927, - "flute": 8928, - "believing": 8929, - "consultant": 8930, - "proceedings": 8931, - "travelling": 8932, - "1853": 8933, - "kit": 8934, - "kerala": 8935, - "yugoslavia": 8936, - "buddy": 8937, - "methodist": 8938, - "##ith": 8939, - "burial": 8940, - "centres": 8941, - "batman": 8942, - "##nda": 8943, - "discontinued": 8944, - "bo": 8945, - "dock": 8946, - "stockholm": 8947, - "lungs": 8948, - "severely": 8949, - "##nk": 8950, - "citing": 8951, - "manga": 8952, - "##ugh": 8953, - "steal": 8954, - "mumbai": 8955, - "iraqi": 8956, - "robot": 8957, - "celebrity": 8958, - "bride": 8959, - "broadcasts": 8960, - "abolished": 8961, - "pot": 8962, - "joel": 8963, - "overhead": 8964, - "franz": 8965, - "packed": 8966, - "reconnaissance": 8967, - "johann": 8968, - "acknowledged": 8969, - "introduce": 8970, - "handled": 8971, - "doctorate": 8972, - "developments": 8973, - "drinks": 8974, - "alley": 8975, - "palestine": 8976, - "##nis": 8977, - "##aki": 8978, - "proceeded": 8979, - "recover": 8980, - "bradley": 8981, - "grain": 8982, - "patch": 8983, - "afford": 8984, - "infection": 8985, - "nationalist": 8986, - "legendary": 8987, - "##ath": 8988, - "interchange": 8989, - "virtually": 8990, - "gen": 8991, - "gravity": 8992, - "exploration": 8993, - "amber": 8994, - "vital": 8995, - "wishes": 8996, - "powell": 8997, - "doctrine": 8998, - "elbow": 8999, - "screenplay": 9000, - "##bird": 9001, - "contribute": 9002, - "indonesian": 9003, - "pet": 9004, - "creates": 9005, - "##com": 9006, - "enzyme": 9007, - "kylie": 9008, - "discipline": 9009, - "drops": 9010, - "manila": 9011, - "hunger": 9012, - "##ien": 9013, - "layers": 9014, - "suffer": 9015, - "fever": 9016, - "bits": 9017, - "monica": 9018, - "keyboard": 9019, - "manages": 9020, - "##hood": 9021, - "searched": 9022, - "appeals": 9023, - "##bad": 9024, - "testament": 9025, - "grande": 9026, - "reid": 9027, - "##war": 9028, - "beliefs": 9029, - "congo": 9030, - "##ification": 9031, - "##dia": 9032, - "si": 9033, - "requiring": 9034, - "##via": 9035, - "casey": 9036, - "1849": 9037, - "regret": 9038, - "streak": 9039, - "rape": 9040, - "depends": 9041, - "syrian": 9042, - "sprint": 9043, - "pound": 9044, - "tourists": 9045, - "upcoming": 9046, - "pub": 9047, - "##xi": 9048, - "tense": 9049, - "##els": 9050, - "practiced": 9051, - "echo": 9052, - "nationwide": 9053, - "guild": 9054, - "motorcycle": 9055, - "liz": 9056, - "##zar": 9057, - "chiefs": 9058, - "desired": 9059, - "elena": 9060, - "bye": 9061, - "precious": 9062, - "absorbed": 9063, - "relatives": 9064, - "booth": 9065, - "pianist": 9066, - "##mal": 9067, - "citizenship": 9068, - "exhausted": 9069, - "wilhelm": 9070, - "##ceae": 9071, - "##hed": 9072, - "noting": 9073, - "quarterback": 9074, - "urge": 9075, - "hectares": 9076, - "##gue": 9077, - "ace": 9078, - "holly": 9079, - "##tal": 9080, - "blonde": 9081, - "davies": 9082, - "parked": 9083, - "sustainable": 9084, - "stepping": 9085, - "twentieth": 9086, - "airfield": 9087, - "galaxy": 9088, - "nest": 9089, - "chip": 9090, - "##nell": 9091, - "tan": 9092, - "shaft": 9093, - "paulo": 9094, - "requirement": 9095, - "##zy": 9096, - "paradise": 9097, - "tobacco": 9098, - "trans": 9099, - "renewed": 9100, - "vietnamese": 9101, - "##cker": 9102, - "##ju": 9103, - "suggesting": 9104, - "catching": 9105, - "holmes": 9106, - "enjoying": 9107, - "md": 9108, - "trips": 9109, - "colt": 9110, - "holder": 9111, - "butterfly": 9112, - "nerve": 9113, - "reformed": 9114, - "cherry": 9115, - "bowling": 9116, - "trailer": 9117, - "carriage": 9118, - "goodbye": 9119, - "appreciate": 9120, - "toy": 9121, - "joshua": 9122, - "interactive": 9123, - "enabled": 9124, - "involve": 9125, - "##kan": 9126, - "collar": 9127, - "determination": 9128, - "bunch": 9129, - "facebook": 9130, - "recall": 9131, - "shorts": 9132, - "superintendent": 9133, - "episcopal": 9134, - "frustration": 9135, - "giovanni": 9136, - "nineteenth": 9137, - "laser": 9138, - "privately": 9139, - "array": 9140, - "circulation": 9141, - "##ovic": 9142, - "armstrong": 9143, - "deals": 9144, - "painful": 9145, - "permit": 9146, - "discrimination": 9147, - "##wi": 9148, - "aires": 9149, - "retiring": 9150, - "cottage": 9151, - "ni": 9152, - "##sta": 9153, - "horizon": 9154, - "ellen": 9155, - "jamaica": 9156, - "ripped": 9157, - "fernando": 9158, - "chapters": 9159, - "playstation": 9160, - "patron": 9161, - "lecturer": 9162, - "navigation": 9163, - "behaviour": 9164, - "genes": 9165, - "georgian": 9166, - "export": 9167, - "solomon": 9168, - "rivals": 9169, - "swift": 9170, - "seventeen": 9171, - "rodriguez": 9172, - "princeton": 9173, - "independently": 9174, - "sox": 9175, - "1847": 9176, - "arguing": 9177, - "entity": 9178, - "casting": 9179, - "hank": 9180, - "criteria": 9181, - "oakland": 9182, - "geographic": 9183, - "milwaukee": 9184, - "reflection": 9185, - "expanding": 9186, - "conquest": 9187, - "dubbed": 9188, - "##tv": 9189, - "halt": 9190, - "brave": 9191, - "brunswick": 9192, - "doi": 9193, - "arched": 9194, - "curtis": 9195, - "divorced": 9196, - "predominantly": 9197, - "somerset": 9198, - "streams": 9199, - "ugly": 9200, - "zoo": 9201, - "horrible": 9202, - "curved": 9203, - "buenos": 9204, - "fierce": 9205, - "dictionary": 9206, - "vector": 9207, - "theological": 9208, - "unions": 9209, - "handful": 9210, - "stability": 9211, - "chan": 9212, - "punjab": 9213, - "segments": 9214, - "##lly": 9215, - "altar": 9216, - "ignoring": 9217, - "gesture": 9218, - "monsters": 9219, - "pastor": 9220, - "##stone": 9221, - "thighs": 9222, - "unexpected": 9223, - "operators": 9224, - "abruptly": 9225, - "coin": 9226, - "compiled": 9227, - "associates": 9228, - "improving": 9229, - "migration": 9230, - "pin": 9231, - "##ose": 9232, - "compact": 9233, - "collegiate": 9234, - "reserved": 9235, - "##urs": 9236, - "quarterfinals": 9237, - "roster": 9238, - "restore": 9239, - "assembled": 9240, - "hurry": 9241, - "oval": 9242, - "##cies": 9243, - "1846": 9244, - "flags": 9245, - "martha": 9246, - "##del": 9247, - "victories": 9248, - "sharply": 9249, - "##rated": 9250, - "argues": 9251, - "deadly": 9252, - "neo": 9253, - "drawings": 9254, - "symbols": 9255, - "performer": 9256, - "##iel": 9257, - "griffin": 9258, - "restrictions": 9259, - "editing": 9260, - "andrews": 9261, - "java": 9262, - "journals": 9263, - "arabia": 9264, - "compositions": 9265, - "dee": 9266, - "pierce": 9267, - "removing": 9268, - "hindi": 9269, - "casino": 9270, - "runway": 9271, - "civilians": 9272, - "minds": 9273, - "nasa": 9274, - "hotels": 9275, - "##zation": 9276, - "refuge": 9277, - "rent": 9278, - "retain": 9279, - "potentially": 9280, - "conferences": 9281, - "suburban": 9282, - "conducting": 9283, - "##tto": 9284, - "##tions": 9285, - "##tle": 9286, - "descended": 9287, - "massacre": 9288, - "##cal": 9289, - "ammunition": 9290, - "terrain": 9291, - "fork": 9292, - "souls": 9293, - "counts": 9294, - "chelsea": 9295, - "durham": 9296, - "drives": 9297, - "cab": 9298, - "##bank": 9299, - "perth": 9300, - "realizing": 9301, - "palestinian": 9302, - "finn": 9303, - "simpson": 9304, - "##dal": 9305, - "betty": 9306, - "##ule": 9307, - "moreover": 9308, - "particles": 9309, - "cardinals": 9310, - "tent": 9311, - "evaluation": 9312, - "extraordinary": 9313, - "##oid": 9314, - "inscription": 9315, - "##works": 9316, - "wednesday": 9317, - "chloe": 9318, - "maintains": 9319, - "panels": 9320, - "ashley": 9321, - "trucks": 9322, - "##nation": 9323, - "cluster": 9324, - "sunlight": 9325, - "strikes": 9326, - "zhang": 9327, - "##wing": 9328, - "dialect": 9329, - "canon": 9330, - "##ap": 9331, - "tucked": 9332, - "##ws": 9333, - "collecting": 9334, - "##mas": 9335, - "##can": 9336, - "##sville": 9337, - "maker": 9338, - "quoted": 9339, - "evan": 9340, - "franco": 9341, - "aria": 9342, - "buying": 9343, - "cleaning": 9344, - "eva": 9345, - "closet": 9346, - "provision": 9347, - "apollo": 9348, - "clinic": 9349, - "rat": 9350, - "##ez": 9351, - "necessarily": 9352, - "ac": 9353, - "##gle": 9354, - "##ising": 9355, - "venues": 9356, - "flipped": 9357, - "cent": 9358, - "spreading": 9359, - "trustees": 9360, - "checking": 9361, - "authorized": 9362, - "##sco": 9363, - "disappointed": 9364, - "##ado": 9365, - "notion": 9366, - "duration": 9367, - "trumpet": 9368, - "hesitated": 9369, - "topped": 9370, - "brussels": 9371, - "rolls": 9372, - "theoretical": 9373, - "hint": 9374, - "define": 9375, - "aggressive": 9376, - "repeat": 9377, - "wash": 9378, - "peaceful": 9379, - "optical": 9380, - "width": 9381, - "allegedly": 9382, - "mcdonald": 9383, - "strict": 9384, - "copyright": 9385, - "##illa": 9386, - "investors": 9387, - "mar": 9388, - "jam": 9389, - "witnesses": 9390, - "sounding": 9391, - "miranda": 9392, - "michelle": 9393, - "privacy": 9394, - "hugo": 9395, - "harmony": 9396, - "##pp": 9397, - "valid": 9398, - "lynn": 9399, - "glared": 9400, - "nina": 9401, - "102": 9402, - "headquartered": 9403, - "diving": 9404, - "boarding": 9405, - "gibson": 9406, - "##ncy": 9407, - "albanian": 9408, - "marsh": 9409, - "routine": 9410, - "dealt": 9411, - "enhanced": 9412, - "er": 9413, - "intelligent": 9414, - "substance": 9415, - "targeted": 9416, - "enlisted": 9417, - "discovers": 9418, - "spinning": 9419, - "observations": 9420, - "pissed": 9421, - "smoking": 9422, - "rebecca": 9423, - "capitol": 9424, - "visa": 9425, - "varied": 9426, - "costume": 9427, - "seemingly": 9428, - "indies": 9429, - "compensation": 9430, - "surgeon": 9431, - "thursday": 9432, - "arsenal": 9433, - "westminster": 9434, - "suburbs": 9435, - "rid": 9436, - "anglican": 9437, - "##ridge": 9438, - "knots": 9439, - "foods": 9440, - "alumni": 9441, - "lighter": 9442, - "fraser": 9443, - "whoever": 9444, - "portal": 9445, - "scandal": 9446, - "##ray": 9447, - "gavin": 9448, - "advised": 9449, - "instructor": 9450, - "flooding": 9451, - "terrorist": 9452, - "##ale": 9453, - "teenage": 9454, - "interim": 9455, - "senses": 9456, - "duck": 9457, - "teen": 9458, - "thesis": 9459, - "abby": 9460, - "eager": 9461, - "overcome": 9462, - "##ile": 9463, - "newport": 9464, - "glenn": 9465, - "rises": 9466, - "shame": 9467, - "##cc": 9468, - "prompted": 9469, - "priority": 9470, - "forgot": 9471, - "bomber": 9472, - "nicolas": 9473, - "protective": 9474, - "360": 9475, - "cartoon": 9476, - "katherine": 9477, - "breeze": 9478, - "lonely": 9479, - "trusted": 9480, - "henderson": 9481, - "richardson": 9482, - "relax": 9483, - "banner": 9484, - "candy": 9485, - "palms": 9486, - "remarkable": 9487, - "##rio": 9488, - "legends": 9489, - "cricketer": 9490, - "essay": 9491, - "ordained": 9492, - "edmund": 9493, - "rifles": 9494, - "trigger": 9495, - "##uri": 9496, - "##away": 9497, - "sail": 9498, - "alert": 9499, - "1830": 9500, - "audiences": 9501, - "penn": 9502, - "sussex": 9503, - "siblings": 9504, - "pursued": 9505, - "indianapolis": 9506, - "resist": 9507, - "rosa": 9508, - "consequence": 9509, - "succeed": 9510, - "avoided": 9511, - "1845": 9512, - "##ulation": 9513, - "inland": 9514, - "##tie": 9515, - "##nna": 9516, - "counsel": 9517, - "profession": 9518, - "chronicle": 9519, - "hurried": 9520, - "##una": 9521, - "eyebrow": 9522, - "eventual": 9523, - "bleeding": 9524, - "innovative": 9525, - "cure": 9526, - "##dom": 9527, - "committees": 9528, - "accounting": 9529, - "con": 9530, - "scope": 9531, - "hardy": 9532, - "heather": 9533, - "tenor": 9534, - "gut": 9535, - "herald": 9536, - "codes": 9537, - "tore": 9538, - "scales": 9539, - "wagon": 9540, - "##oo": 9541, - "luxury": 9542, - "tin": 9543, - "prefer": 9544, - "fountain": 9545, - "triangle": 9546, - "bonds": 9547, - "darling": 9548, - "convoy": 9549, - "dried": 9550, - "traced": 9551, - "beings": 9552, - "troy": 9553, - "accidentally": 9554, - "slam": 9555, - "findings": 9556, - "smelled": 9557, - "joey": 9558, - "lawyers": 9559, - "outcome": 9560, - "steep": 9561, - "bosnia": 9562, - "configuration": 9563, - "shifting": 9564, - "toll": 9565, - "brook": 9566, - "performers": 9567, - "lobby": 9568, - "philosophical": 9569, - "construct": 9570, - "shrine": 9571, - "aggregate": 9572, - "boot": 9573, - "cox": 9574, - "phenomenon": 9575, - "savage": 9576, - "insane": 9577, - "solely": 9578, - "reynolds": 9579, - "lifestyle": 9580, - "##ima": 9581, - "nationally": 9582, - "holdings": 9583, - "consideration": 9584, - "enable": 9585, - "edgar": 9586, - "mo": 9587, - "mama": 9588, - "##tein": 9589, - "fights": 9590, - "relegation": 9591, - "chances": 9592, - "atomic": 9593, - "hub": 9594, - "conjunction": 9595, - "awkward": 9596, - "reactions": 9597, - "currency": 9598, - "finale": 9599, - "kumar": 9600, - "underwent": 9601, - "steering": 9602, - "elaborate": 9603, - "gifts": 9604, - "comprising": 9605, - "melissa": 9606, - "veins": 9607, - "reasonable": 9608, - "sunshine": 9609, - "chi": 9610, - "solve": 9611, - "trails": 9612, - "inhabited": 9613, - "elimination": 9614, - "ethics": 9615, - "huh": 9616, - "ana": 9617, - "molly": 9618, - "consent": 9619, - "apartments": 9620, - "layout": 9621, - "marines": 9622, - "##ces": 9623, - "hunters": 9624, - "bulk": 9625, - "##oma": 9626, - "hometown": 9627, - "##wall": 9628, - "##mont": 9629, - "cracked": 9630, - "reads": 9631, - "neighbouring": 9632, - "withdrawn": 9633, - "admission": 9634, - "wingspan": 9635, - "damned": 9636, - "anthology": 9637, - "lancashire": 9638, - "brands": 9639, - "batting": 9640, - "forgive": 9641, - "cuban": 9642, - "awful": 9643, - "##lyn": 9644, - "104": 9645, - "dimensions": 9646, - "imagination": 9647, - "##ade": 9648, - "dante": 9649, - "##ship": 9650, - "tracking": 9651, - "desperately": 9652, - "goalkeeper": 9653, - "##yne": 9654, - "groaned": 9655, - "workshops": 9656, - "confident": 9657, - "burton": 9658, - "gerald": 9659, - "milton": 9660, - "circus": 9661, - "uncertain": 9662, - "slope": 9663, - "copenhagen": 9664, - "sophia": 9665, - "fog": 9666, - "philosopher": 9667, - "portraits": 9668, - "accent": 9669, - "cycling": 9670, - "varying": 9671, - "gripped": 9672, - "larvae": 9673, - "garrett": 9674, - "specified": 9675, - "scotia": 9676, - "mature": 9677, - "luther": 9678, - "kurt": 9679, - "rap": 9680, - "##kes": 9681, - "aerial": 9682, - "750": 9683, - "ferdinand": 9684, - "heated": 9685, - "es": 9686, - "transported": 9687, - "##shan": 9688, - "safely": 9689, - "nonetheless": 9690, - "##orn": 9691, - "##gal": 9692, - "motors": 9693, - "demanding": 9694, - "##sburg": 9695, - "startled": 9696, - "##brook": 9697, - "ally": 9698, - "generate": 9699, - "caps": 9700, - "ghana": 9701, - "stained": 9702, - "demo": 9703, - "mentions": 9704, - "beds": 9705, - "ap": 9706, - "afterward": 9707, - "diary": 9708, - "##bling": 9709, - "utility": 9710, - "##iro": 9711, - "richards": 9712, - "1837": 9713, - "conspiracy": 9714, - "conscious": 9715, - "shining": 9716, - "footsteps": 9717, - "observer": 9718, - "cyprus": 9719, - "urged": 9720, - "loyalty": 9721, - "developer": 9722, - "probability": 9723, - "olive": 9724, - "upgraded": 9725, - "gym": 9726, - "miracle": 9727, - "insects": 9728, - "graves": 9729, - "1844": 9730, - "ourselves": 9731, - "hydrogen": 9732, - "amazon": 9733, - "katie": 9734, - "tickets": 9735, - "poets": 9736, - "##pm": 9737, - "planes": 9738, - "##pan": 9739, - "prevention": 9740, - "witnessed": 9741, - "dense": 9742, - "jin": 9743, - "randy": 9744, - "tang": 9745, - "warehouse": 9746, - "monroe": 9747, - "bang": 9748, - "archived": 9749, - "elderly": 9750, - "investigations": 9751, - "alec": 9752, - "granite": 9753, - "mineral": 9754, - "conflicts": 9755, - "controlling": 9756, - "aboriginal": 9757, - "carlo": 9758, - "##zu": 9759, - "mechanics": 9760, - "stan": 9761, - "stark": 9762, - "rhode": 9763, - "skirt": 9764, - "est": 9765, - "##berry": 9766, - "bombs": 9767, - "respected": 9768, - "##horn": 9769, - "imposed": 9770, - "limestone": 9771, - "deny": 9772, - "nominee": 9773, - "memphis": 9774, - "grabbing": 9775, - "disabled": 9776, - "##als": 9777, - "amusement": 9778, - "aa": 9779, - "frankfurt": 9780, - "corn": 9781, - "referendum": 9782, - "varies": 9783, - "slowed": 9784, - "disk": 9785, - "firms": 9786, - "unconscious": 9787, - "incredible": 9788, - "clue": 9789, - "sue": 9790, - "##zhou": 9791, - "twist": 9792, - "##cio": 9793, - "joins": 9794, - "idaho": 9795, - "chad": 9796, - "developers": 9797, - "computing": 9798, - "destroyer": 9799, - "103": 9800, - "mortal": 9801, - "tucker": 9802, - "kingston": 9803, - "choices": 9804, - "yu": 9805, - "carson": 9806, - "1800": 9807, - "os": 9808, - "whitney": 9809, - "geneva": 9810, - "pretend": 9811, - "dimension": 9812, - "staged": 9813, - "plateau": 9814, - "maya": 9815, - "##une": 9816, - "freestyle": 9817, - "##bc": 9818, - "rovers": 9819, - "hiv": 9820, - "##ids": 9821, - "tristan": 9822, - "classroom": 9823, - "prospect": 9824, - "##hus": 9825, - "honestly": 9826, - "diploma": 9827, - "lied": 9828, - "thermal": 9829, - "auxiliary": 9830, - "feast": 9831, - "unlikely": 9832, - "iata": 9833, - "##tel": 9834, - "morocco": 9835, - "pounding": 9836, - "treasury": 9837, - "lithuania": 9838, - "considerably": 9839, - "1841": 9840, - "dish": 9841, - "1812": 9842, - "geological": 9843, - "matching": 9844, - "stumbled": 9845, - "destroying": 9846, - "marched": 9847, - "brien": 9848, - "advances": 9849, - "cake": 9850, - "nicole": 9851, - "belle": 9852, - "settling": 9853, - "measuring": 9854, - "directing": 9855, - "##mie": 9856, - "tuesday": 9857, - "bassist": 9858, - "capabilities": 9859, - "stunned": 9860, - "fraud": 9861, - "torpedo": 9862, - "##list": 9863, - "##phone": 9864, - "anton": 9865, - "wisdom": 9866, - "surveillance": 9867, - "ruined": 9868, - "##ulate": 9869, - "lawsuit": 9870, - "healthcare": 9871, - "theorem": 9872, - "halls": 9873, - "trend": 9874, - "aka": 9875, - "horizontal": 9876, - "dozens": 9877, - "acquire": 9878, - "lasting": 9879, - "swim": 9880, - "hawk": 9881, - "gorgeous": 9882, - "fees": 9883, - "vicinity": 9884, - "decrease": 9885, - "adoption": 9886, - "tactics": 9887, - "##ography": 9888, - "pakistani": 9889, - "##ole": 9890, - "draws": 9891, - "##hall": 9892, - "willie": 9893, - "burke": 9894, - "heath": 9895, - "algorithm": 9896, - "integral": 9897, - "powder": 9898, - "elliott": 9899, - "brigadier": 9900, - "jackie": 9901, - "tate": 9902, - "varieties": 9903, - "darker": 9904, - "##cho": 9905, - "lately": 9906, - "cigarette": 9907, - "specimens": 9908, - "adds": 9909, - "##ree": 9910, - "##ensis": 9911, - "##inger": 9912, - "exploded": 9913, - "finalist": 9914, - "cia": 9915, - "murders": 9916, - "wilderness": 9917, - "arguments": 9918, - "nicknamed": 9919, - "acceptance": 9920, - "onwards": 9921, - "manufacture": 9922, - "robertson": 9923, - "jets": 9924, - "tampa": 9925, - "enterprises": 9926, - "blog": 9927, - "loudly": 9928, - "composers": 9929, - "nominations": 9930, - "1838": 9931, - "ai": 9932, - "malta": 9933, - "inquiry": 9934, - "automobile": 9935, - "hosting": 9936, - "viii": 9937, - "rays": 9938, - "tilted": 9939, - "grief": 9940, - "museums": 9941, - "strategies": 9942, - "furious": 9943, - "euro": 9944, - "equality": 9945, - "cohen": 9946, - "poison": 9947, - "surrey": 9948, - "wireless": 9949, - "governed": 9950, - "ridiculous": 9951, - "moses": 9952, - "##esh": 9953, - "##room": 9954, - "vanished": 9955, - "##ito": 9956, - "barnes": 9957, - "attract": 9958, - "morrison": 9959, - "istanbul": 9960, - "##iness": 9961, - "absent": 9962, - "rotation": 9963, - "petition": 9964, - "janet": 9965, - "##logical": 9966, - "satisfaction": 9967, - "custody": 9968, - "deliberately": 9969, - "observatory": 9970, - "comedian": 9971, - "surfaces": 9972, - "pinyin": 9973, - "novelist": 9974, - "strictly": 9975, - "canterbury": 9976, - "oslo": 9977, - "monks": 9978, - "embrace": 9979, - "ibm": 9980, - "jealous": 9981, - "photograph": 9982, - "continent": 9983, - "dorothy": 9984, - "marina": 9985, - "doc": 9986, - "excess": 9987, - "holden": 9988, - "allegations": 9989, - "explaining": 9990, - "stack": 9991, - "avoiding": 9992, - "lance": 9993, - "storyline": 9994, - "majesty": 9995, - "poorly": 9996, - "spike": 9997, - "dos": 9998, - "bradford": 9999, - "raven": 10000, - "travis": 10001, - "classics": 10002, - "proven": 10003, - "voltage": 10004, - "pillow": 10005, - "fists": 10006, - "butt": 10007, - "1842": 10008, - "interpreted": 10009, - "##car": 10010, - "1839": 10011, - "gage": 10012, - "telegraph": 10013, - "lens": 10014, - "promising": 10015, - "expelled": 10016, - "casual": 10017, - "collector": 10018, - "zones": 10019, - "##min": 10020, - "silly": 10021, - "nintendo": 10022, - "##kh": 10023, - "##bra": 10024, - "downstairs": 10025, - "chef": 10026, - "suspicious": 10027, - "afl": 10028, - "flies": 10029, - "vacant": 10030, - "uganda": 10031, - "pregnancy": 10032, - "condemned": 10033, - "lutheran": 10034, - "estimates": 10035, - "cheap": 10036, - "decree": 10037, - "saxon": 10038, - "proximity": 10039, - "stripped": 10040, - "idiot": 10041, - "deposits": 10042, - "contrary": 10043, - "presenter": 10044, - "magnus": 10045, - "glacier": 10046, - "im": 10047, - "offense": 10048, - "edwin": 10049, - "##ori": 10050, - "upright": 10051, - "##long": 10052, - "bolt": 10053, - "##ois": 10054, - "toss": 10055, - "geographical": 10056, - "##izes": 10057, - "environments": 10058, - "delicate": 10059, - "marking": 10060, - "abstract": 10061, - "xavier": 10062, - "nails": 10063, - "windsor": 10064, - "plantation": 10065, - "occurring": 10066, - "equity": 10067, - "saskatchewan": 10068, - "fears": 10069, - "drifted": 10070, - "sequences": 10071, - "vegetation": 10072, - "revolt": 10073, - "##stic": 10074, - "1843": 10075, - "sooner": 10076, - "fusion": 10077, - "opposing": 10078, - "nato": 10079, - "skating": 10080, - "1836": 10081, - "secretly": 10082, - "ruin": 10083, - "lease": 10084, - "##oc": 10085, - "edit": 10086, - "##nne": 10087, - "flora": 10088, - "anxiety": 10089, - "ruby": 10090, - "##ological": 10091, - "##mia": 10092, - "tel": 10093, - "bout": 10094, - "taxi": 10095, - "emmy": 10096, - "frost": 10097, - "rainbow": 10098, - "compounds": 10099, - "foundations": 10100, - "rainfall": 10101, - "assassination": 10102, - "nightmare": 10103, - "dominican": 10104, - "##win": 10105, - "achievements": 10106, - "deserve": 10107, - "orlando": 10108, - "intact": 10109, - "armenia": 10110, - "##nte": 10111, - "calgary": 10112, - "valentine": 10113, - "106": 10114, - "marion": 10115, - "proclaimed": 10116, - "theodore": 10117, - "bells": 10118, - "courtyard": 10119, - "thigh": 10120, - "gonzalez": 10121, - "console": 10122, - "troop": 10123, - "minimal": 10124, - "monte": 10125, - "everyday": 10126, - "##ence": 10127, - "##if": 10128, - "supporter": 10129, - "terrorism": 10130, - "buck": 10131, - "openly": 10132, - "presbyterian": 10133, - "activists": 10134, - "carpet": 10135, - "##iers": 10136, - "rubbing": 10137, - "uprising": 10138, - "##yi": 10139, - "cute": 10140, - "conceived": 10141, - "legally": 10142, - "##cht": 10143, - "millennium": 10144, - "cello": 10145, - "velocity": 10146, - "ji": 10147, - "rescued": 10148, - "cardiff": 10149, - "1835": 10150, - "rex": 10151, - "concentrate": 10152, - "senators": 10153, - "beard": 10154, - "rendered": 10155, - "glowing": 10156, - "battalions": 10157, - "scouts": 10158, - "competitors": 10159, - "sculptor": 10160, - "catalogue": 10161, - "arctic": 10162, - "ion": 10163, - "raja": 10164, - "bicycle": 10165, - "wow": 10166, - "glancing": 10167, - "lawn": 10168, - "##woman": 10169, - "gentleman": 10170, - "lighthouse": 10171, - "publish": 10172, - "predicted": 10173, - "calculated": 10174, - "##val": 10175, - "variants": 10176, - "##gne": 10177, - "strain": 10178, - "##ui": 10179, - "winston": 10180, - "deceased": 10181, - "##nus": 10182, - "touchdowns": 10183, - "brady": 10184, - "caleb": 10185, - "sinking": 10186, - "echoed": 10187, - "crush": 10188, - "hon": 10189, - "blessed": 10190, - "protagonist": 10191, - "hayes": 10192, - "endangered": 10193, - "magnitude": 10194, - "editors": 10195, - "##tine": 10196, - "estimate": 10197, - "responsibilities": 10198, - "##mel": 10199, - "backup": 10200, - "laying": 10201, - "consumed": 10202, - "sealed": 10203, - "zurich": 10204, - "lovers": 10205, - "frustrated": 10206, - "##eau": 10207, - "ahmed": 10208, - "kicking": 10209, - "mit": 10210, - "treasurer": 10211, - "1832": 10212, - "biblical": 10213, - "refuse": 10214, - "terrified": 10215, - "pump": 10216, - "agrees": 10217, - "genuine": 10218, - "imprisonment": 10219, - "refuses": 10220, - "plymouth": 10221, - "##hen": 10222, - "lou": 10223, - "##nen": 10224, - "tara": 10225, - "trembling": 10226, - "antarctic": 10227, - "ton": 10228, - "learns": 10229, - "##tas": 10230, - "crap": 10231, - "crucial": 10232, - "faction": 10233, - "atop": 10234, - "##borough": 10235, - "wrap": 10236, - "lancaster": 10237, - "odds": 10238, - "hopkins": 10239, - "erik": 10240, - "lyon": 10241, - "##eon": 10242, - "bros": 10243, - "##ode": 10244, - "snap": 10245, - "locality": 10246, - "tips": 10247, - "empress": 10248, - "crowned": 10249, - "cal": 10250, - "acclaimed": 10251, - "chuckled": 10252, - "##ory": 10253, - "clara": 10254, - "sends": 10255, - "mild": 10256, - "towel": 10257, - "##fl": 10258, - "##day": 10259, - "##а": 10260, - "wishing": 10261, - "assuming": 10262, - "interviewed": 10263, - "##bal": 10264, - "##die": 10265, - "interactions": 10266, - "eden": 10267, - "cups": 10268, - "helena": 10269, - "##lf": 10270, - "indie": 10271, - "beck": 10272, - "##fire": 10273, - "batteries": 10274, - "filipino": 10275, - "wizard": 10276, - "parted": 10277, - "##lam": 10278, - "traces": 10279, - "##born": 10280, - "rows": 10281, - "idol": 10282, - "albany": 10283, - "delegates": 10284, - "##ees": 10285, - "##sar": 10286, - "discussions": 10287, - "##ex": 10288, - "notre": 10289, - "instructed": 10290, - "belgrade": 10291, - "highways": 10292, - "suggestion": 10293, - "lauren": 10294, - "possess": 10295, - "orientation": 10296, - "alexandria": 10297, - "abdul": 10298, - "beats": 10299, - "salary": 10300, - "reunion": 10301, - "ludwig": 10302, - "alright": 10303, - "wagner": 10304, - "intimate": 10305, - "pockets": 10306, - "slovenia": 10307, - "hugged": 10308, - "brighton": 10309, - "merchants": 10310, - "cruel": 10311, - "stole": 10312, - "trek": 10313, - "slopes": 10314, - "repairs": 10315, - "enrollment": 10316, - "politically": 10317, - "underlying": 10318, - "promotional": 10319, - "counting": 10320, - "boeing": 10321, - "##bb": 10322, - "isabella": 10323, - "naming": 10324, - "##и": 10325, - "keen": 10326, - "bacteria": 10327, - "listing": 10328, - "separately": 10329, - "belfast": 10330, - "ussr": 10331, - "450": 10332, - "lithuanian": 10333, - "anybody": 10334, - "ribs": 10335, - "sphere": 10336, - "martinez": 10337, - "cock": 10338, - "embarrassed": 10339, - "proposals": 10340, - "fragments": 10341, - "nationals": 10342, - "##fs": 10343, - "##wski": 10344, - "premises": 10345, - "fin": 10346, - "1500": 10347, - "alpine": 10348, - "matched": 10349, - "freely": 10350, - "bounded": 10351, - "jace": 10352, - "sleeve": 10353, - "##af": 10354, - "gaming": 10355, - "pier": 10356, - "populated": 10357, - "evident": 10358, - "##like": 10359, - "frances": 10360, - "flooded": 10361, - "##dle": 10362, - "frightened": 10363, - "pour": 10364, - "trainer": 10365, - "framed": 10366, - "visitor": 10367, - "challenging": 10368, - "pig": 10369, - "wickets": 10370, - "##fold": 10371, - "infected": 10372, - "email": 10373, - "##pes": 10374, - "arose": 10375, - "##aw": 10376, - "reward": 10377, - "ecuador": 10378, - "oblast": 10379, - "vale": 10380, - "ch": 10381, - "shuttle": 10382, - "##usa": 10383, - "bach": 10384, - "rankings": 10385, - "forbidden": 10386, - "cornwall": 10387, - "accordance": 10388, - "salem": 10389, - "consumers": 10390, - "bruno": 10391, - "fantastic": 10392, - "toes": 10393, - "machinery": 10394, - "resolved": 10395, - "julius": 10396, - "remembering": 10397, - "propaganda": 10398, - "iceland": 10399, - "bombardment": 10400, - "tide": 10401, - "contacts": 10402, - "wives": 10403, - "##rah": 10404, - "concerto": 10405, - "macdonald": 10406, - "albania": 10407, - "implement": 10408, - "daisy": 10409, - "tapped": 10410, - "sudan": 10411, - "helmet": 10412, - "angela": 10413, - "mistress": 10414, - "##lic": 10415, - "crop": 10416, - "sunk": 10417, - "finest": 10418, - "##craft": 10419, - "hostile": 10420, - "##ute": 10421, - "##tsu": 10422, - "boxer": 10423, - "fr": 10424, - "paths": 10425, - "adjusted": 10426, - "habit": 10427, - "ballot": 10428, - "supervision": 10429, - "soprano": 10430, - "##zen": 10431, - "bullets": 10432, - "wicked": 10433, - "sunset": 10434, - "regiments": 10435, - "disappear": 10436, - "lamp": 10437, - "performs": 10438, - "app": 10439, - "##gia": 10440, - "##oa": 10441, - "rabbit": 10442, - "digging": 10443, - "incidents": 10444, - "entries": 10445, - "##cion": 10446, - "dishes": 10447, - "##oi": 10448, - "introducing": 10449, - "##ati": 10450, - "##fied": 10451, - "freshman": 10452, - "slot": 10453, - "jill": 10454, - "tackles": 10455, - "baroque": 10456, - "backs": 10457, - "##iest": 10458, - "lone": 10459, - "sponsor": 10460, - "destiny": 10461, - "altogether": 10462, - "convert": 10463, - "##aro": 10464, - "consensus": 10465, - "shapes": 10466, - "demonstration": 10467, - "basically": 10468, - "feminist": 10469, - "auction": 10470, - "artifacts": 10471, - "##bing": 10472, - "strongest": 10473, - "twitter": 10474, - "halifax": 10475, - "2019": 10476, - "allmusic": 10477, - "mighty": 10478, - "smallest": 10479, - "precise": 10480, - "alexandra": 10481, - "viola": 10482, - "##los": 10483, - "##ille": 10484, - "manuscripts": 10485, - "##illo": 10486, - "dancers": 10487, - "ari": 10488, - "managers": 10489, - "monuments": 10490, - "blades": 10491, - "barracks": 10492, - "springfield": 10493, - "maiden": 10494, - "consolidated": 10495, - "electron": 10496, - "##end": 10497, - "berry": 10498, - "airing": 10499, - "wheat": 10500, - "nobel": 10501, - "inclusion": 10502, - "blair": 10503, - "payments": 10504, - "geography": 10505, - "bee": 10506, - "cc": 10507, - "eleanor": 10508, - "react": 10509, - "##hurst": 10510, - "afc": 10511, - "manitoba": 10512, - "##yu": 10513, - "su": 10514, - "lineup": 10515, - "fitness": 10516, - "recreational": 10517, - "investments": 10518, - "airborne": 10519, - "disappointment": 10520, - "##dis": 10521, - "edmonton": 10522, - "viewing": 10523, - "##row": 10524, - "renovation": 10525, - "##cast": 10526, - "infant": 10527, - "bankruptcy": 10528, - "roses": 10529, - "aftermath": 10530, - "pavilion": 10531, - "##yer": 10532, - "carpenter": 10533, - "withdrawal": 10534, - "ladder": 10535, - "##hy": 10536, - "discussing": 10537, - "popped": 10538, - "reliable": 10539, - "agreements": 10540, - "rochester": 10541, - "##abad": 10542, - "curves": 10543, - "bombers": 10544, - "220": 10545, - "rao": 10546, - "reverend": 10547, - "decreased": 10548, - "choosing": 10549, - "107": 10550, - "stiff": 10551, - "consulting": 10552, - "naples": 10553, - "crawford": 10554, - "tracy": 10555, - "ka": 10556, - "ribbon": 10557, - "cops": 10558, - "##lee": 10559, - "crushed": 10560, - "deciding": 10561, - "unified": 10562, - "teenager": 10563, - "accepting": 10564, - "flagship": 10565, - "explorer": 10566, - "poles": 10567, - "sanchez": 10568, - "inspection": 10569, - "revived": 10570, - "skilled": 10571, - "induced": 10572, - "exchanged": 10573, - "flee": 10574, - "locals": 10575, - "tragedy": 10576, - "swallow": 10577, - "loading": 10578, - "hanna": 10579, - "demonstrate": 10580, - "##ela": 10581, - "salvador": 10582, - "flown": 10583, - "contestants": 10584, - "civilization": 10585, - "##ines": 10586, - "wanna": 10587, - "rhodes": 10588, - "fletcher": 10589, - "hector": 10590, - "knocking": 10591, - "considers": 10592, - "##ough": 10593, - "nash": 10594, - "mechanisms": 10595, - "sensed": 10596, - "mentally": 10597, - "walt": 10598, - "unclear": 10599, - "##eus": 10600, - "renovated": 10601, - "madame": 10602, - "##cks": 10603, - "crews": 10604, - "governmental": 10605, - "##hin": 10606, - "undertaken": 10607, - "monkey": 10608, - "##ben": 10609, - "##ato": 10610, - "fatal": 10611, - "armored": 10612, - "copa": 10613, - "caves": 10614, - "governance": 10615, - "grasp": 10616, - "perception": 10617, - "certification": 10618, - "froze": 10619, - "damp": 10620, - "tugged": 10621, - "wyoming": 10622, - "##rg": 10623, - "##ero": 10624, - "newman": 10625, - "##lor": 10626, - "nerves": 10627, - "curiosity": 10628, - "graph": 10629, - "115": 10630, - "##ami": 10631, - "withdraw": 10632, - "tunnels": 10633, - "dull": 10634, - "meredith": 10635, - "moss": 10636, - "exhibits": 10637, - "neighbors": 10638, - "communicate": 10639, - "accuracy": 10640, - "explored": 10641, - "raiders": 10642, - "republicans": 10643, - "secular": 10644, - "kat": 10645, - "superman": 10646, - "penny": 10647, - "criticised": 10648, - "##tch": 10649, - "freed": 10650, - "update": 10651, - "conviction": 10652, - "wade": 10653, - "ham": 10654, - "likewise": 10655, - "delegation": 10656, - "gotta": 10657, - "doll": 10658, - "promises": 10659, - "technological": 10660, - "myth": 10661, - "nationality": 10662, - "resolve": 10663, - "convent": 10664, - "##mark": 10665, - "sharon": 10666, - "dig": 10667, - "sip": 10668, - "coordinator": 10669, - "entrepreneur": 10670, - "fold": 10671, - "##dine": 10672, - "capability": 10673, - "councillor": 10674, - "synonym": 10675, - "blown": 10676, - "swan": 10677, - "cursed": 10678, - "1815": 10679, - "jonas": 10680, - "haired": 10681, - "sofa": 10682, - "canvas": 10683, - "keeper": 10684, - "rivalry": 10685, - "##hart": 10686, - "rapper": 10687, - "speedway": 10688, - "swords": 10689, - "postal": 10690, - "maxwell": 10691, - "estonia": 10692, - "potter": 10693, - "recurring": 10694, - "##nn": 10695, - "##ave": 10696, - "errors": 10697, - "##oni": 10698, - "cognitive": 10699, - "1834": 10700, - "##²": 10701, - "claws": 10702, - "nadu": 10703, - "roberto": 10704, - "bce": 10705, - "wrestler": 10706, - "ellie": 10707, - "##ations": 10708, - "infinite": 10709, - "ink": 10710, - "##tia": 10711, - "presumably": 10712, - "finite": 10713, - "staircase": 10714, - "108": 10715, - "noel": 10716, - "patricia": 10717, - "nacional": 10718, - "##cation": 10719, - "chill": 10720, - "eternal": 10721, - "tu": 10722, - "preventing": 10723, - "prussia": 10724, - "fossil": 10725, - "limbs": 10726, - "##logist": 10727, - "ernst": 10728, - "frog": 10729, - "perez": 10730, - "rene": 10731, - "##ace": 10732, - "pizza": 10733, - "prussian": 10734, - "##ios": 10735, - "##vy": 10736, - "molecules": 10737, - "regulatory": 10738, - "answering": 10739, - "opinions": 10740, - "sworn": 10741, - "lengths": 10742, - "supposedly": 10743, - "hypothesis": 10744, - "upward": 10745, - "habitats": 10746, - "seating": 10747, - "ancestors": 10748, - "drank": 10749, - "yield": 10750, - "hd": 10751, - "synthesis": 10752, - "researcher": 10753, - "modest": 10754, - "##var": 10755, - "mothers": 10756, - "peered": 10757, - "voluntary": 10758, - "homeland": 10759, - "##the": 10760, - "acclaim": 10761, - "##igan": 10762, - "static": 10763, - "valve": 10764, - "luxembourg": 10765, - "alto": 10766, - "carroll": 10767, - "fe": 10768, - "receptor": 10769, - "norton": 10770, - "ambulance": 10771, - "##tian": 10772, - "johnston": 10773, - "catholics": 10774, - "depicting": 10775, - "jointly": 10776, - "elephant": 10777, - "gloria": 10778, - "mentor": 10779, - "badge": 10780, - "ahmad": 10781, - "distinguish": 10782, - "remarked": 10783, - "councils": 10784, - "precisely": 10785, - "allison": 10786, - "advancing": 10787, - "detection": 10788, - "crowded": 10789, - "##10": 10790, - "cooperative": 10791, - "ankle": 10792, - "mercedes": 10793, - "dagger": 10794, - "surrendered": 10795, - "pollution": 10796, - "commit": 10797, - "subway": 10798, - "jeffrey": 10799, - "lesson": 10800, - "sculptures": 10801, - "provider": 10802, - "##fication": 10803, - "membrane": 10804, - "timothy": 10805, - "rectangular": 10806, - "fiscal": 10807, - "heating": 10808, - "teammate": 10809, - "basket": 10810, - "particle": 10811, - "anonymous": 10812, - "deployment": 10813, - "##ple": 10814, - "missiles": 10815, - "courthouse": 10816, - "proportion": 10817, - "shoe": 10818, - "sec": 10819, - "##ller": 10820, - "complaints": 10821, - "forbes": 10822, - "blacks": 10823, - "abandon": 10824, - "remind": 10825, - "sizes": 10826, - "overwhelming": 10827, - "autobiography": 10828, - "natalie": 10829, - "##awa": 10830, - "risks": 10831, - "contestant": 10832, - "countryside": 10833, - "babies": 10834, - "scorer": 10835, - "invaded": 10836, - "enclosed": 10837, - "proceed": 10838, - "hurling": 10839, - "disorders": 10840, - "##cu": 10841, - "reflecting": 10842, - "continuously": 10843, - "cruiser": 10844, - "graduates": 10845, - "freeway": 10846, - "investigated": 10847, - "ore": 10848, - "deserved": 10849, - "maid": 10850, - "blocking": 10851, - "phillip": 10852, - "jorge": 10853, - "shakes": 10854, - "dove": 10855, - "mann": 10856, - "variables": 10857, - "lacked": 10858, - "burden": 10859, - "accompanying": 10860, - "que": 10861, - "consistently": 10862, - "organizing": 10863, - "provisional": 10864, - "complained": 10865, - "endless": 10866, - "##rm": 10867, - "tubes": 10868, - "juice": 10869, - "georges": 10870, - "krishna": 10871, - "mick": 10872, - "labels": 10873, - "thriller": 10874, - "##uch": 10875, - "laps": 10876, - "arcade": 10877, - "sage": 10878, - "snail": 10879, - "##table": 10880, - "shannon": 10881, - "fi": 10882, - "laurence": 10883, - "seoul": 10884, - "vacation": 10885, - "presenting": 10886, - "hire": 10887, - "churchill": 10888, - "surprisingly": 10889, - "prohibited": 10890, - "savannah": 10891, - "technically": 10892, - "##oli": 10893, - "170": 10894, - "##lessly": 10895, - "testimony": 10896, - "suited": 10897, - "speeds": 10898, - "toys": 10899, - "romans": 10900, - "mlb": 10901, - "flowering": 10902, - "measurement": 10903, - "talented": 10904, - "kay": 10905, - "settings": 10906, - "charleston": 10907, - "expectations": 10908, - "shattered": 10909, - "achieving": 10910, - "triumph": 10911, - "ceremonies": 10912, - "portsmouth": 10913, - "lanes": 10914, - "mandatory": 10915, - "loser": 10916, - "stretching": 10917, - "cologne": 10918, - "realizes": 10919, - "seventy": 10920, - "cornell": 10921, - "careers": 10922, - "webb": 10923, - "##ulating": 10924, - "americas": 10925, - "budapest": 10926, - "ava": 10927, - "suspicion": 10928, - "##ison": 10929, - "yo": 10930, - "conrad": 10931, - "##hai": 10932, - "sterling": 10933, - "jessie": 10934, - "rector": 10935, - "##az": 10936, - "1831": 10937, - "transform": 10938, - "organize": 10939, - "loans": 10940, - "christine": 10941, - "volcanic": 10942, - "warrant": 10943, - "slender": 10944, - "summers": 10945, - "subfamily": 10946, - "newer": 10947, - "danced": 10948, - "dynamics": 10949, - "rhine": 10950, - "proceeds": 10951, - "heinrich": 10952, - "gastropod": 10953, - "commands": 10954, - "sings": 10955, - "facilitate": 10956, - "easter": 10957, - "ra": 10958, - "positioned": 10959, - "responses": 10960, - "expense": 10961, - "fruits": 10962, - "yanked": 10963, - "imported": 10964, - "25th": 10965, - "velvet": 10966, - "vic": 10967, - "primitive": 10968, - "tribune": 10969, - "baldwin": 10970, - "neighbourhood": 10971, - "donna": 10972, - "rip": 10973, - "hay": 10974, - "pr": 10975, - "##uro": 10976, - "1814": 10977, - "espn": 10978, - "welcomed": 10979, - "##aria": 10980, - "qualifier": 10981, - "glare": 10982, - "highland": 10983, - "timing": 10984, - "##cted": 10985, - "shells": 10986, - "eased": 10987, - "geometry": 10988, - "louder": 10989, - "exciting": 10990, - "slovakia": 10991, - "##sion": 10992, - "##iz": 10993, - "##lot": 10994, - "savings": 10995, - "prairie": 10996, - "##ques": 10997, - "marching": 10998, - "rafael": 10999, - "tonnes": 11000, - "##lled": 11001, - "curtain": 11002, - "preceding": 11003, - "shy": 11004, - "heal": 11005, - "greene": 11006, - "worthy": 11007, - "##pot": 11008, - "detachment": 11009, - "bury": 11010, - "sherman": 11011, - "##eck": 11012, - "reinforced": 11013, - "seeks": 11014, - "bottles": 11015, - "contracted": 11016, - "duchess": 11017, - "outfit": 11018, - "walsh": 11019, - "##sc": 11020, - "mickey": 11021, - "##ase": 11022, - "geoffrey": 11023, - "archer": 11024, - "squeeze": 11025, - "dawson": 11026, - "eliminate": 11027, - "invention": 11028, - "##enberg": 11029, - "neal": 11030, - "##eth": 11031, - "stance": 11032, - "dealer": 11033, - "coral": 11034, - "maple": 11035, - "retire": 11036, - "polo": 11037, - "simplified": 11038, - "##ht": 11039, - "1833": 11040, - "hid": 11041, - "watts": 11042, - "backwards": 11043, - "jules": 11044, - "##oke": 11045, - "genesis": 11046, - "mt": 11047, - "frames": 11048, - "rebounds": 11049, - "burma": 11050, - "woodland": 11051, - "moist": 11052, - "santos": 11053, - "whispers": 11054, - "drained": 11055, - "subspecies": 11056, - "##aa": 11057, - "streaming": 11058, - "ulster": 11059, - "burnt": 11060, - "correspondence": 11061, - "maternal": 11062, - "gerard": 11063, - "denis": 11064, - "stealing": 11065, - "##load": 11066, - "genius": 11067, - "duchy": 11068, - "##oria": 11069, - "inaugurated": 11070, - "momentum": 11071, - "suits": 11072, - "placement": 11073, - "sovereign": 11074, - "clause": 11075, - "thames": 11076, - "##hara": 11077, - "confederation": 11078, - "reservation": 11079, - "sketch": 11080, - "yankees": 11081, - "lets": 11082, - "rotten": 11083, - "charm": 11084, - "hal": 11085, - "verses": 11086, - "ultra": 11087, - "commercially": 11088, - "dot": 11089, - "salon": 11090, - "citation": 11091, - "adopt": 11092, - "winnipeg": 11093, - "mist": 11094, - "allocated": 11095, - "cairo": 11096, - "##boy": 11097, - "jenkins": 11098, - "interference": 11099, - "objectives": 11100, - "##wind": 11101, - "1820": 11102, - "portfolio": 11103, - "armoured": 11104, - "sectors": 11105, - "##eh": 11106, - "initiatives": 11107, - "##world": 11108, - "integrity": 11109, - "exercises": 11110, - "robe": 11111, - "tap": 11112, - "ab": 11113, - "gazed": 11114, - "##tones": 11115, - "distracted": 11116, - "rulers": 11117, - "111": 11118, - "favorable": 11119, - "jerome": 11120, - "tended": 11121, - "cart": 11122, - "factories": 11123, - "##eri": 11124, - "diplomat": 11125, - "valued": 11126, - "gravel": 11127, - "charitable": 11128, - "##try": 11129, - "calvin": 11130, - "exploring": 11131, - "chang": 11132, - "shepherd": 11133, - "terrace": 11134, - "pdf": 11135, - "pupil": 11136, - "##ural": 11137, - "reflects": 11138, - "ups": 11139, - "##rch": 11140, - "governors": 11141, - "shelf": 11142, - "depths": 11143, - "##nberg": 11144, - "trailed": 11145, - "crest": 11146, - "tackle": 11147, - "##nian": 11148, - "##ats": 11149, - "hatred": 11150, - "##kai": 11151, - "clare": 11152, - "makers": 11153, - "ethiopia": 11154, - "longtime": 11155, - "detected": 11156, - "embedded": 11157, - "lacking": 11158, - "slapped": 11159, - "rely": 11160, - "thomson": 11161, - "anticipation": 11162, - "iso": 11163, - "morton": 11164, - "successive": 11165, - "agnes": 11166, - "screenwriter": 11167, - "straightened": 11168, - "philippe": 11169, - "playwright": 11170, - "haunted": 11171, - "licence": 11172, - "iris": 11173, - "intentions": 11174, - "sutton": 11175, - "112": 11176, - "logical": 11177, - "correctly": 11178, - "##weight": 11179, - "branded": 11180, - "licked": 11181, - "tipped": 11182, - "silva": 11183, - "ricky": 11184, - "narrator": 11185, - "requests": 11186, - "##ents": 11187, - "greeted": 11188, - "supernatural": 11189, - "cow": 11190, - "##wald": 11191, - "lung": 11192, - "refusing": 11193, - "employer": 11194, - "strait": 11195, - "gaelic": 11196, - "liner": 11197, - "##piece": 11198, - "zoe": 11199, - "sabha": 11200, - "##mba": 11201, - "driveway": 11202, - "harvest": 11203, - "prints": 11204, - "bates": 11205, - "reluctantly": 11206, - "threshold": 11207, - "algebra": 11208, - "ira": 11209, - "wherever": 11210, - "coupled": 11211, - "240": 11212, - "assumption": 11213, - "picks": 11214, - "##air": 11215, - "designers": 11216, - "raids": 11217, - "gentlemen": 11218, - "##ean": 11219, - "roller": 11220, - "blowing": 11221, - "leipzig": 11222, - "locks": 11223, - "screw": 11224, - "dressing": 11225, - "strand": 11226, - "##lings": 11227, - "scar": 11228, - "dwarf": 11229, - "depicts": 11230, - "##nu": 11231, - "nods": 11232, - "##mine": 11233, - "differ": 11234, - "boris": 11235, - "##eur": 11236, - "yuan": 11237, - "flip": 11238, - "##gie": 11239, - "mob": 11240, - "invested": 11241, - "questioning": 11242, - "applying": 11243, - "##ture": 11244, - "shout": 11245, - "##sel": 11246, - "gameplay": 11247, - "blamed": 11248, - "illustrations": 11249, - "bothered": 11250, - "weakness": 11251, - "rehabilitation": 11252, - "##of": 11253, - "##zes": 11254, - "envelope": 11255, - "rumors": 11256, - "miners": 11257, - "leicester": 11258, - "subtle": 11259, - "kerry": 11260, - "##ico": 11261, - "ferguson": 11262, - "##fu": 11263, - "premiership": 11264, - "ne": 11265, - "##cat": 11266, - "bengali": 11267, - "prof": 11268, - "catches": 11269, - "remnants": 11270, - "dana": 11271, - "##rily": 11272, - "shouting": 11273, - "presidents": 11274, - "baltic": 11275, - "ought": 11276, - "ghosts": 11277, - "dances": 11278, - "sailors": 11279, - "shirley": 11280, - "fancy": 11281, - "dominic": 11282, - "##bie": 11283, - "madonna": 11284, - "##rick": 11285, - "bark": 11286, - "buttons": 11287, - "gymnasium": 11288, - "ashes": 11289, - "liver": 11290, - "toby": 11291, - "oath": 11292, - "providence": 11293, - "doyle": 11294, - "evangelical": 11295, - "nixon": 11296, - "cement": 11297, - "carnegie": 11298, - "embarked": 11299, - "hatch": 11300, - "surroundings": 11301, - "guarantee": 11302, - "needing": 11303, - "pirate": 11304, - "essence": 11305, - "##bee": 11306, - "filter": 11307, - "crane": 11308, - "hammond": 11309, - "projected": 11310, - "immune": 11311, - "percy": 11312, - "twelfth": 11313, - "##ult": 11314, - "regent": 11315, - "doctoral": 11316, - "damon": 11317, - "mikhail": 11318, - "##ichi": 11319, - "lu": 11320, - "critically": 11321, - "elect": 11322, - "realised": 11323, - "abortion": 11324, - "acute": 11325, - "screening": 11326, - "mythology": 11327, - "steadily": 11328, - "##fc": 11329, - "frown": 11330, - "nottingham": 11331, - "kirk": 11332, - "wa": 11333, - "minneapolis": 11334, - "##rra": 11335, - "module": 11336, - "algeria": 11337, - "mc": 11338, - "nautical": 11339, - "encounters": 11340, - "surprising": 11341, - "statues": 11342, - "availability": 11343, - "shirts": 11344, - "pie": 11345, - "alma": 11346, - "brows": 11347, - "munster": 11348, - "mack": 11349, - "soup": 11350, - "crater": 11351, - "tornado": 11352, - "sanskrit": 11353, - "cedar": 11354, - "explosive": 11355, - "bordered": 11356, - "dixon": 11357, - "planets": 11358, - "stamp": 11359, - "exam": 11360, - "happily": 11361, - "##bble": 11362, - "carriers": 11363, - "kidnapped": 11364, - "##vis": 11365, - "accommodation": 11366, - "emigrated": 11367, - "##met": 11368, - "knockout": 11369, - "correspondent": 11370, - "violation": 11371, - "profits": 11372, - "peaks": 11373, - "lang": 11374, - "specimen": 11375, - "agenda": 11376, - "ancestry": 11377, - "pottery": 11378, - "spelling": 11379, - "equations": 11380, - "obtaining": 11381, - "ki": 11382, - "linking": 11383, - "1825": 11384, - "debris": 11385, - "asylum": 11386, - "##20": 11387, - "buddhism": 11388, - "teddy": 11389, - "##ants": 11390, - "gazette": 11391, - "##nger": 11392, - "##sse": 11393, - "dental": 11394, - "eligibility": 11395, - "utc": 11396, - "fathers": 11397, - "averaged": 11398, - "zimbabwe": 11399, - "francesco": 11400, - "coloured": 11401, - "hissed": 11402, - "translator": 11403, - "lynch": 11404, - "mandate": 11405, - "humanities": 11406, - "mackenzie": 11407, - "uniforms": 11408, - "lin": 11409, - "##iana": 11410, - "##gio": 11411, - "asset": 11412, - "mhz": 11413, - "fitting": 11414, - "samantha": 11415, - "genera": 11416, - "wei": 11417, - "rim": 11418, - "beloved": 11419, - "shark": 11420, - "riot": 11421, - "entities": 11422, - "expressions": 11423, - "indo": 11424, - "carmen": 11425, - "slipping": 11426, - "owing": 11427, - "abbot": 11428, - "neighbor": 11429, - "sidney": 11430, - "##av": 11431, - "rats": 11432, - "recommendations": 11433, - "encouraging": 11434, - "squadrons": 11435, - "anticipated": 11436, - "commanders": 11437, - "conquered": 11438, - "##oto": 11439, - "donations": 11440, - "diagnosed": 11441, - "##mond": 11442, - "divide": 11443, - "##iva": 11444, - "guessed": 11445, - "decoration": 11446, - "vernon": 11447, - "auditorium": 11448, - "revelation": 11449, - "conversations": 11450, - "##kers": 11451, - "##power": 11452, - "herzegovina": 11453, - "dash": 11454, - "alike": 11455, - "protested": 11456, - "lateral": 11457, - "herman": 11458, - "accredited": 11459, - "mg": 11460, - "##gent": 11461, - "freeman": 11462, - "mel": 11463, - "fiji": 11464, - "crow": 11465, - "crimson": 11466, - "##rine": 11467, - "livestock": 11468, - "##pped": 11469, - "humanitarian": 11470, - "bored": 11471, - "oz": 11472, - "whip": 11473, - "##lene": 11474, - "##ali": 11475, - "legitimate": 11476, - "alter": 11477, - "grinning": 11478, - "spelled": 11479, - "anxious": 11480, - "oriental": 11481, - "wesley": 11482, - "##nin": 11483, - "##hole": 11484, - "carnival": 11485, - "controller": 11486, - "detect": 11487, - "##ssa": 11488, - "bowed": 11489, - "educator": 11490, - "kosovo": 11491, - "macedonia": 11492, - "##sin": 11493, - "occupy": 11494, - "mastering": 11495, - "stephanie": 11496, - "janeiro": 11497, - "para": 11498, - "unaware": 11499, - "nurses": 11500, - "noon": 11501, - "135": 11502, - "cam": 11503, - "hopefully": 11504, - "ranger": 11505, - "combine": 11506, - "sociology": 11507, - "polar": 11508, - "rica": 11509, - "##eer": 11510, - "neill": 11511, - "##sman": 11512, - "holocaust": 11513, - "##ip": 11514, - "doubled": 11515, - "lust": 11516, - "1828": 11517, - "109": 11518, - "decent": 11519, - "cooling": 11520, - "unveiled": 11521, - "##card": 11522, - "1829": 11523, - "nsw": 11524, - "homer": 11525, - "chapman": 11526, - "meyer": 11527, - "##gin": 11528, - "dive": 11529, - "mae": 11530, - "reagan": 11531, - "expertise": 11532, - "##gled": 11533, - "darwin": 11534, - "brooke": 11535, - "sided": 11536, - "prosecution": 11537, - "investigating": 11538, - "comprised": 11539, - "petroleum": 11540, - "genres": 11541, - "reluctant": 11542, - "differently": 11543, - "trilogy": 11544, - "johns": 11545, - "vegetables": 11546, - "corpse": 11547, - "highlighted": 11548, - "lounge": 11549, - "pension": 11550, - "unsuccessfully": 11551, - "elegant": 11552, - "aided": 11553, - "ivory": 11554, - "beatles": 11555, - "amelia": 11556, - "cain": 11557, - "dubai": 11558, - "sunny": 11559, - "immigrant": 11560, - "babe": 11561, - "click": 11562, - "##nder": 11563, - "underwater": 11564, - "pepper": 11565, - "combining": 11566, - "mumbled": 11567, - "atlas": 11568, - "horns": 11569, - "accessed": 11570, - "ballad": 11571, - "physicians": 11572, - "homeless": 11573, - "gestured": 11574, - "rpm": 11575, - "freak": 11576, - "louisville": 11577, - "corporations": 11578, - "patriots": 11579, - "prizes": 11580, - "rational": 11581, - "warn": 11582, - "modes": 11583, - "decorative": 11584, - "overnight": 11585, - "din": 11586, - "troubled": 11587, - "phantom": 11588, - "##ort": 11589, - "monarch": 11590, - "sheer": 11591, - "##dorf": 11592, - "generals": 11593, - "guidelines": 11594, - "organs": 11595, - "addresses": 11596, - "##zon": 11597, - "enhance": 11598, - "curling": 11599, - "parishes": 11600, - "cord": 11601, - "##kie": 11602, - "linux": 11603, - "caesar": 11604, - "deutsche": 11605, - "bavaria": 11606, - "##bia": 11607, - "coleman": 11608, - "cyclone": 11609, - "##eria": 11610, - "bacon": 11611, - "petty": 11612, - "##yama": 11613, - "##old": 11614, - "hampton": 11615, - "diagnosis": 11616, - "1824": 11617, - "throws": 11618, - "complexity": 11619, - "rita": 11620, - "disputed": 11621, - "##₃": 11622, - "pablo": 11623, - "##sch": 11624, - "marketed": 11625, - "trafficking": 11626, - "##ulus": 11627, - "examine": 11628, - "plague": 11629, - "formats": 11630, - "##oh": 11631, - "vault": 11632, - "faithful": 11633, - "##bourne": 11634, - "webster": 11635, - "##ox": 11636, - "highlights": 11637, - "##ient": 11638, - "##ann": 11639, - "phones": 11640, - "vacuum": 11641, - "sandwich": 11642, - "modeling": 11643, - "##gated": 11644, - "bolivia": 11645, - "clergy": 11646, - "qualities": 11647, - "isabel": 11648, - "##nas": 11649, - "##ars": 11650, - "wears": 11651, - "screams": 11652, - "reunited": 11653, - "annoyed": 11654, - "bra": 11655, - "##ancy": 11656, - "##rate": 11657, - "differential": 11658, - "transmitter": 11659, - "tattoo": 11660, - "container": 11661, - "poker": 11662, - "##och": 11663, - "excessive": 11664, - "resides": 11665, - "cowboys": 11666, - "##tum": 11667, - "augustus": 11668, - "trash": 11669, - "providers": 11670, - "statute": 11671, - "retreated": 11672, - "balcony": 11673, - "reversed": 11674, - "void": 11675, - "storey": 11676, - "preceded": 11677, - "masses": 11678, - "leap": 11679, - "laughs": 11680, - "neighborhoods": 11681, - "wards": 11682, - "schemes": 11683, - "falcon": 11684, - "santo": 11685, - "battlefield": 11686, - "pad": 11687, - "ronnie": 11688, - "thread": 11689, - "lesbian": 11690, - "venus": 11691, - "##dian": 11692, - "beg": 11693, - "sandstone": 11694, - "daylight": 11695, - "punched": 11696, - "gwen": 11697, - "analog": 11698, - "stroked": 11699, - "wwe": 11700, - "acceptable": 11701, - "measurements": 11702, - "dec": 11703, - "toxic": 11704, - "##kel": 11705, - "adequate": 11706, - "surgical": 11707, - "economist": 11708, - "parameters": 11709, - "varsity": 11710, - "##sberg": 11711, - "quantity": 11712, - "ella": 11713, - "##chy": 11714, - "##rton": 11715, - "countess": 11716, - "generating": 11717, - "precision": 11718, - "diamonds": 11719, - "expressway": 11720, - "ga": 11721, - "##ı": 11722, - "1821": 11723, - "uruguay": 11724, - "talents": 11725, - "galleries": 11726, - "expenses": 11727, - "scanned": 11728, - "colleague": 11729, - "outlets": 11730, - "ryder": 11731, - "lucien": 11732, - "##ila": 11733, - "paramount": 11734, - "##bon": 11735, - "syracuse": 11736, - "dim": 11737, - "fangs": 11738, - "gown": 11739, - "sweep": 11740, - "##sie": 11741, - "toyota": 11742, - "missionaries": 11743, - "websites": 11744, - "##nsis": 11745, - "sentences": 11746, - "adviser": 11747, - "val": 11748, - "trademark": 11749, - "spells": 11750, - "##plane": 11751, - "patience": 11752, - "starter": 11753, - "slim": 11754, - "##borg": 11755, - "toe": 11756, - "incredibly": 11757, - "shoots": 11758, - "elliot": 11759, - "nobility": 11760, - "##wyn": 11761, - "cowboy": 11762, - "endorsed": 11763, - "gardner": 11764, - "tendency": 11765, - "persuaded": 11766, - "organisms": 11767, - "emissions": 11768, - "kazakhstan": 11769, - "amused": 11770, - "boring": 11771, - "chips": 11772, - "themed": 11773, - "##hand": 11774, - "llc": 11775, - "constantinople": 11776, - "chasing": 11777, - "systematic": 11778, - "guatemala": 11779, - "borrowed": 11780, - "erin": 11781, - "carey": 11782, - "##hard": 11783, - "highlands": 11784, - "struggles": 11785, - "1810": 11786, - "##ifying": 11787, - "##ced": 11788, - "wong": 11789, - "exceptions": 11790, - "develops": 11791, - "enlarged": 11792, - "kindergarten": 11793, - "castro": 11794, - "##ern": 11795, - "##rina": 11796, - "leigh": 11797, - "zombie": 11798, - "juvenile": 11799, - "##most": 11800, - "consul": 11801, - "##nar": 11802, - "sailor": 11803, - "hyde": 11804, - "clarence": 11805, - "intensive": 11806, - "pinned": 11807, - "nasty": 11808, - "useless": 11809, - "jung": 11810, - "clayton": 11811, - "stuffed": 11812, - "exceptional": 11813, - "ix": 11814, - "apostolic": 11815, - "230": 11816, - "transactions": 11817, - "##dge": 11818, - "exempt": 11819, - "swinging": 11820, - "cove": 11821, - "religions": 11822, - "##ash": 11823, - "shields": 11824, - "dairy": 11825, - "bypass": 11826, - "190": 11827, - "pursuing": 11828, - "bug": 11829, - "joyce": 11830, - "bombay": 11831, - "chassis": 11832, - "southampton": 11833, - "chat": 11834, - "interact": 11835, - "redesignated": 11836, - "##pen": 11837, - "nascar": 11838, - "pray": 11839, - "salmon": 11840, - "rigid": 11841, - "regained": 11842, - "malaysian": 11843, - "grim": 11844, - "publicity": 11845, - "constituted": 11846, - "capturing": 11847, - "toilet": 11848, - "delegate": 11849, - "purely": 11850, - "tray": 11851, - "drift": 11852, - "loosely": 11853, - "striker": 11854, - "weakened": 11855, - "trinidad": 11856, - "mitch": 11857, - "itv": 11858, - "defines": 11859, - "transmitted": 11860, - "ming": 11861, - "scarlet": 11862, - "nodding": 11863, - "fitzgerald": 11864, - "fu": 11865, - "narrowly": 11866, - "sp": 11867, - "tooth": 11868, - "standings": 11869, - "virtue": 11870, - "##₁": 11871, - "##wara": 11872, - "##cting": 11873, - "chateau": 11874, - "gloves": 11875, - "lid": 11876, - "##nel": 11877, - "hurting": 11878, - "conservatory": 11879, - "##pel": 11880, - "sinclair": 11881, - "reopened": 11882, - "sympathy": 11883, - "nigerian": 11884, - "strode": 11885, - "advocated": 11886, - "optional": 11887, - "chronic": 11888, - "discharge": 11889, - "##rc": 11890, - "suck": 11891, - "compatible": 11892, - "laurel": 11893, - "stella": 11894, - "shi": 11895, - "fails": 11896, - "wage": 11897, - "dodge": 11898, - "128": 11899, - "informal": 11900, - "sorts": 11901, - "levi": 11902, - "buddha": 11903, - "villagers": 11904, - "##aka": 11905, - "chronicles": 11906, - "heavier": 11907, - "summoned": 11908, - "gateway": 11909, - "3000": 11910, - "eleventh": 11911, - "jewelry": 11912, - "translations": 11913, - "accordingly": 11914, - "seas": 11915, - "##ency": 11916, - "fiber": 11917, - "pyramid": 11918, - "cubic": 11919, - "dragging": 11920, - "##ista": 11921, - "caring": 11922, - "##ops": 11923, - "android": 11924, - "contacted": 11925, - "lunar": 11926, - "##dt": 11927, - "kai": 11928, - "lisbon": 11929, - "patted": 11930, - "1826": 11931, - "sacramento": 11932, - "theft": 11933, - "madagascar": 11934, - "subtropical": 11935, - "disputes": 11936, - "ta": 11937, - "holidays": 11938, - "piper": 11939, - "willow": 11940, - "mare": 11941, - "cane": 11942, - "itunes": 11943, - "newfoundland": 11944, - "benny": 11945, - "companions": 11946, - "dong": 11947, - "raj": 11948, - "observe": 11949, - "roar": 11950, - "charming": 11951, - "plaque": 11952, - "tibetan": 11953, - "fossils": 11954, - "enacted": 11955, - "manning": 11956, - "bubble": 11957, - "tina": 11958, - "tanzania": 11959, - "##eda": 11960, - "##hir": 11961, - "funk": 11962, - "swamp": 11963, - "deputies": 11964, - "cloak": 11965, - "ufc": 11966, - "scenario": 11967, - "par": 11968, - "scratch": 11969, - "metals": 11970, - "anthem": 11971, - "guru": 11972, - "engaging": 11973, - "specially": 11974, - "##boat": 11975, - "dialects": 11976, - "nineteen": 11977, - "cecil": 11978, - "duet": 11979, - "disability": 11980, - "messenger": 11981, - "unofficial": 11982, - "##lies": 11983, - "defunct": 11984, - "eds": 11985, - "moonlight": 11986, - "drainage": 11987, - "surname": 11988, - "puzzle": 11989, - "honda": 11990, - "switching": 11991, - "conservatives": 11992, - "mammals": 11993, - "knox": 11994, - "broadcaster": 11995, - "sidewalk": 11996, - "cope": 11997, - "##ried": 11998, - "benson": 11999, - "princes": 12000, - "peterson": 12001, - "##sal": 12002, - "bedford": 12003, - "sharks": 12004, - "eli": 12005, - "wreck": 12006, - "alberto": 12007, - "gasp": 12008, - "archaeology": 12009, - "lgbt": 12010, - "teaches": 12011, - "securities": 12012, - "madness": 12013, - "compromise": 12014, - "waving": 12015, - "coordination": 12016, - "davidson": 12017, - "visions": 12018, - "leased": 12019, - "possibilities": 12020, - "eighty": 12021, - "jun": 12022, - "fernandez": 12023, - "enthusiasm": 12024, - "assassin": 12025, - "sponsorship": 12026, - "reviewer": 12027, - "kingdoms": 12028, - "estonian": 12029, - "laboratories": 12030, - "##fy": 12031, - "##nal": 12032, - "applies": 12033, - "verb": 12034, - "celebrations": 12035, - "##zzo": 12036, - "rowing": 12037, - "lightweight": 12038, - "sadness": 12039, - "submit": 12040, - "mvp": 12041, - "balanced": 12042, - "dude": 12043, - "##vas": 12044, - "explicitly": 12045, - "metric": 12046, - "magnificent": 12047, - "mound": 12048, - "brett": 12049, - "mohammad": 12050, - "mistakes": 12051, - "irregular": 12052, - "##hing": 12053, - "##ass": 12054, - "sanders": 12055, - "betrayed": 12056, - "shipped": 12057, - "surge": 12058, - "##enburg": 12059, - "reporters": 12060, - "termed": 12061, - "georg": 12062, - "pity": 12063, - "verbal": 12064, - "bulls": 12065, - "abbreviated": 12066, - "enabling": 12067, - "appealed": 12068, - "##are": 12069, - "##atic": 12070, - "sicily": 12071, - "sting": 12072, - "heel": 12073, - "sweetheart": 12074, - "bart": 12075, - "spacecraft": 12076, - "brutal": 12077, - "monarchy": 12078, - "##tter": 12079, - "aberdeen": 12080, - "cameo": 12081, - "diane": 12082, - "##ub": 12083, - "survivor": 12084, - "clyde": 12085, - "##aries": 12086, - "complaint": 12087, - "##makers": 12088, - "clarinet": 12089, - "delicious": 12090, - "chilean": 12091, - "karnataka": 12092, - "coordinates": 12093, - "1818": 12094, - "panties": 12095, - "##rst": 12096, - "pretending": 12097, - "ar": 12098, - "dramatically": 12099, - "kiev": 12100, - "bella": 12101, - "tends": 12102, - "distances": 12103, - "113": 12104, - "catalog": 12105, - "launching": 12106, - "instances": 12107, - "telecommunications": 12108, - "portable": 12109, - "lindsay": 12110, - "vatican": 12111, - "##eim": 12112, - "angles": 12113, - "aliens": 12114, - "marker": 12115, - "stint": 12116, - "screens": 12117, - "bolton": 12118, - "##rne": 12119, - "judy": 12120, - "wool": 12121, - "benedict": 12122, - "plasma": 12123, - "europa": 12124, - "spark": 12125, - "imaging": 12126, - "filmmaker": 12127, - "swiftly": 12128, - "##een": 12129, - "contributor": 12130, - "##nor": 12131, - "opted": 12132, - "stamps": 12133, - "apologize": 12134, - "financing": 12135, - "butter": 12136, - "gideon": 12137, - "sophisticated": 12138, - "alignment": 12139, - "avery": 12140, - "chemicals": 12141, - "yearly": 12142, - "speculation": 12143, - "prominence": 12144, - "professionally": 12145, - "##ils": 12146, - "immortal": 12147, - "institutional": 12148, - "inception": 12149, - "wrists": 12150, - "identifying": 12151, - "tribunal": 12152, - "derives": 12153, - "gains": 12154, - "##wo": 12155, - "papal": 12156, - "preference": 12157, - "linguistic": 12158, - "vince": 12159, - "operative": 12160, - "brewery": 12161, - "##ont": 12162, - "unemployment": 12163, - "boyd": 12164, - "##ured": 12165, - "##outs": 12166, - "albeit": 12167, - "prophet": 12168, - "1813": 12169, - "bi": 12170, - "##rr": 12171, - "##face": 12172, - "##rad": 12173, - "quarterly": 12174, - "asteroid": 12175, - "cleaned": 12176, - "radius": 12177, - "temper": 12178, - "##llen": 12179, - "telugu": 12180, - "jerk": 12181, - "viscount": 12182, - "menu": 12183, - "##ote": 12184, - "glimpse": 12185, - "##aya": 12186, - "yacht": 12187, - "hawaiian": 12188, - "baden": 12189, - "##rl": 12190, - "laptop": 12191, - "readily": 12192, - "##gu": 12193, - "monetary": 12194, - "offshore": 12195, - "scots": 12196, - "watches": 12197, - "##yang": 12198, - "##arian": 12199, - "upgrade": 12200, - "needle": 12201, - "xbox": 12202, - "lea": 12203, - "encyclopedia": 12204, - "flank": 12205, - "fingertips": 12206, - "##pus": 12207, - "delight": 12208, - "teachings": 12209, - "confirm": 12210, - "roth": 12211, - "beaches": 12212, - "midway": 12213, - "winters": 12214, - "##iah": 12215, - "teasing": 12216, - "daytime": 12217, - "beverly": 12218, - "gambling": 12219, - "bonnie": 12220, - "##backs": 12221, - "regulated": 12222, - "clement": 12223, - "hermann": 12224, - "tricks": 12225, - "knot": 12226, - "##shing": 12227, - "##uring": 12228, - "##vre": 12229, - "detached": 12230, - "ecological": 12231, - "owed": 12232, - "specialty": 12233, - "byron": 12234, - "inventor": 12235, - "bats": 12236, - "stays": 12237, - "screened": 12238, - "unesco": 12239, - "midland": 12240, - "trim": 12241, - "affection": 12242, - "##ander": 12243, - "##rry": 12244, - "jess": 12245, - "thoroughly": 12246, - "feedback": 12247, - "##uma": 12248, - "chennai": 12249, - "strained": 12250, - "heartbeat": 12251, - "wrapping": 12252, - "overtime": 12253, - "pleaded": 12254, - "##sworth": 12255, - "mon": 12256, - "leisure": 12257, - "oclc": 12258, - "##tate": 12259, - "##ele": 12260, - "feathers": 12261, - "angelo": 12262, - "thirds": 12263, - "nuts": 12264, - "surveys": 12265, - "clever": 12266, - "gill": 12267, - "commentator": 12268, - "##dos": 12269, - "darren": 12270, - "rides": 12271, - "gibraltar": 12272, - "##nc": 12273, - "##mu": 12274, - "dissolution": 12275, - "dedication": 12276, - "shin": 12277, - "meals": 12278, - "saddle": 12279, - "elvis": 12280, - "reds": 12281, - "chaired": 12282, - "taller": 12283, - "appreciation": 12284, - "functioning": 12285, - "niece": 12286, - "favored": 12287, - "advocacy": 12288, - "robbie": 12289, - "criminals": 12290, - "suffolk": 12291, - "yugoslav": 12292, - "passport": 12293, - "constable": 12294, - "congressman": 12295, - "hastings": 12296, - "vera": 12297, - "##rov": 12298, - "consecrated": 12299, - "sparks": 12300, - "ecclesiastical": 12301, - "confined": 12302, - "##ovich": 12303, - "muller": 12304, - "floyd": 12305, - "nora": 12306, - "1822": 12307, - "paved": 12308, - "1827": 12309, - "cumberland": 12310, - "ned": 12311, - "saga": 12312, - "spiral": 12313, - "##flow": 12314, - "appreciated": 12315, - "yi": 12316, - "collaborative": 12317, - "treating": 12318, - "similarities": 12319, - "feminine": 12320, - "finishes": 12321, - "##ib": 12322, - "jade": 12323, - "import": 12324, - "##nse": 12325, - "##hot": 12326, - "champagne": 12327, - "mice": 12328, - "securing": 12329, - "celebrities": 12330, - "helsinki": 12331, - "attributes": 12332, - "##gos": 12333, - "cousins": 12334, - "phases": 12335, - "ache": 12336, - "lucia": 12337, - "gandhi": 12338, - "submission": 12339, - "vicar": 12340, - "spear": 12341, - "shine": 12342, - "tasmania": 12343, - "biting": 12344, - "detention": 12345, - "constitute": 12346, - "tighter": 12347, - "seasonal": 12348, - "##gus": 12349, - "terrestrial": 12350, - "matthews": 12351, - "##oka": 12352, - "effectiveness": 12353, - "parody": 12354, - "philharmonic": 12355, - "##onic": 12356, - "1816": 12357, - "strangers": 12358, - "encoded": 12359, - "consortium": 12360, - "guaranteed": 12361, - "regards": 12362, - "shifts": 12363, - "tortured": 12364, - "collision": 12365, - "supervisor": 12366, - "inform": 12367, - "broader": 12368, - "insight": 12369, - "theaters": 12370, - "armour": 12371, - "emeritus": 12372, - "blink": 12373, - "incorporates": 12374, - "mapping": 12375, - "##50": 12376, - "##ein": 12377, - "handball": 12378, - "flexible": 12379, - "##nta": 12380, - "substantially": 12381, - "generous": 12382, - "thief": 12383, - "##own": 12384, - "carr": 12385, - "loses": 12386, - "1793": 12387, - "prose": 12388, - "ucla": 12389, - "romeo": 12390, - "generic": 12391, - "metallic": 12392, - "realization": 12393, - "damages": 12394, - "mk": 12395, - "commissioners": 12396, - "zach": 12397, - "default": 12398, - "##ther": 12399, - "helicopters": 12400, - "lengthy": 12401, - "stems": 12402, - "spa": 12403, - "partnered": 12404, - "spectators": 12405, - "rogue": 12406, - "indication": 12407, - "penalties": 12408, - "teresa": 12409, - "1801": 12410, - "sen": 12411, - "##tric": 12412, - "dalton": 12413, - "##wich": 12414, - "irving": 12415, - "photographic": 12416, - "##vey": 12417, - "dell": 12418, - "deaf": 12419, - "peters": 12420, - "excluded": 12421, - "unsure": 12422, - "##vable": 12423, - "patterson": 12424, - "crawled": 12425, - "##zio": 12426, - "resided": 12427, - "whipped": 12428, - "latvia": 12429, - "slower": 12430, - "ecole": 12431, - "pipes": 12432, - "employers": 12433, - "maharashtra": 12434, - "comparable": 12435, - "va": 12436, - "textile": 12437, - "pageant": 12438, - "##gel": 12439, - "alphabet": 12440, - "binary": 12441, - "irrigation": 12442, - "chartered": 12443, - "choked": 12444, - "antoine": 12445, - "offs": 12446, - "waking": 12447, - "supplement": 12448, - "##wen": 12449, - "quantities": 12450, - "demolition": 12451, - "regain": 12452, - "locate": 12453, - "urdu": 12454, - "folks": 12455, - "alt": 12456, - "114": 12457, - "##mc": 12458, - "scary": 12459, - "andreas": 12460, - "whites": 12461, - "##ava": 12462, - "classrooms": 12463, - "mw": 12464, - "aesthetic": 12465, - "publishes": 12466, - "valleys": 12467, - "guides": 12468, - "cubs": 12469, - "johannes": 12470, - "bryant": 12471, - "conventions": 12472, - "affecting": 12473, - "##itt": 12474, - "drain": 12475, - "awesome": 12476, - "isolation": 12477, - "prosecutor": 12478, - "ambitious": 12479, - "apology": 12480, - "captive": 12481, - "downs": 12482, - "atmospheric": 12483, - "lorenzo": 12484, - "aisle": 12485, - "beef": 12486, - "foul": 12487, - "##onia": 12488, - "kidding": 12489, - "composite": 12490, - "disturbed": 12491, - "illusion": 12492, - "natives": 12493, - "##ffer": 12494, - "emi": 12495, - "rockets": 12496, - "riverside": 12497, - "wartime": 12498, - "painters": 12499, - "adolf": 12500, - "melted": 12501, - "##ail": 12502, - "uncertainty": 12503, - "simulation": 12504, - "hawks": 12505, - "progressed": 12506, - "meantime": 12507, - "builder": 12508, - "spray": 12509, - "breach": 12510, - "unhappy": 12511, - "regina": 12512, - "russians": 12513, - "##urg": 12514, - "determining": 12515, - "##tation": 12516, - "tram": 12517, - "1806": 12518, - "##quin": 12519, - "aging": 12520, - "##12": 12521, - "1823": 12522, - "garion": 12523, - "rented": 12524, - "mister": 12525, - "diaz": 12526, - "terminated": 12527, - "clip": 12528, - "1817": 12529, - "depend": 12530, - "nervously": 12531, - "disco": 12532, - "owe": 12533, - "defenders": 12534, - "shiva": 12535, - "notorious": 12536, - "disbelief": 12537, - "shiny": 12538, - "worcester": 12539, - "##gation": 12540, - "##yr": 12541, - "trailing": 12542, - "undertook": 12543, - "islander": 12544, - "belarus": 12545, - "limitations": 12546, - "watershed": 12547, - "fuller": 12548, - "overlooking": 12549, - "utilized": 12550, - "raphael": 12551, - "1819": 12552, - "synthetic": 12553, - "breakdown": 12554, - "klein": 12555, - "##nate": 12556, - "moaned": 12557, - "memoir": 12558, - "lamb": 12559, - "practicing": 12560, - "##erly": 12561, - "cellular": 12562, - "arrows": 12563, - "exotic": 12564, - "##graphy": 12565, - "witches": 12566, - "117": 12567, - "charted": 12568, - "rey": 12569, - "hut": 12570, - "hierarchy": 12571, - "subdivision": 12572, - "freshwater": 12573, - "giuseppe": 12574, - "aloud": 12575, - "reyes": 12576, - "qatar": 12577, - "marty": 12578, - "sideways": 12579, - "utterly": 12580, - "sexually": 12581, - "jude": 12582, - "prayers": 12583, - "mccarthy": 12584, - "softball": 12585, - "blend": 12586, - "damien": 12587, - "##gging": 12588, - "##metric": 12589, - "wholly": 12590, - "erupted": 12591, - "lebanese": 12592, - "negro": 12593, - "revenues": 12594, - "tasted": 12595, - "comparative": 12596, - "teamed": 12597, - "transaction": 12598, - "labeled": 12599, - "maori": 12600, - "sovereignty": 12601, - "parkway": 12602, - "trauma": 12603, - "gran": 12604, - "malay": 12605, - "121": 12606, - "advancement": 12607, - "descendant": 12608, - "2020": 12609, - "buzz": 12610, - "salvation": 12611, - "inventory": 12612, - "symbolic": 12613, - "##making": 12614, - "antarctica": 12615, - "mps": 12616, - "##gas": 12617, - "##bro": 12618, - "mohammed": 12619, - "myanmar": 12620, - "holt": 12621, - "submarines": 12622, - "tones": 12623, - "##lman": 12624, - "locker": 12625, - "patriarch": 12626, - "bangkok": 12627, - "emerson": 12628, - "remarks": 12629, - "predators": 12630, - "kin": 12631, - "afghan": 12632, - "confession": 12633, - "norwich": 12634, - "rental": 12635, - "emerge": 12636, - "advantages": 12637, - "##zel": 12638, - "rca": 12639, - "##hold": 12640, - "shortened": 12641, - "storms": 12642, - "aidan": 12643, - "##matic": 12644, - "autonomy": 12645, - "compliance": 12646, - "##quet": 12647, - "dudley": 12648, - "atp": 12649, - "##osis": 12650, - "1803": 12651, - "motto": 12652, - "documentation": 12653, - "summary": 12654, - "professors": 12655, - "spectacular": 12656, - "christina": 12657, - "archdiocese": 12658, - "flashing": 12659, - "innocence": 12660, - "remake": 12661, - "##dell": 12662, - "psychic": 12663, - "reef": 12664, - "scare": 12665, - "employ": 12666, - "rs": 12667, - "sticks": 12668, - "meg": 12669, - "gus": 12670, - "leans": 12671, - "##ude": 12672, - "accompany": 12673, - "bergen": 12674, - "tomas": 12675, - "##iko": 12676, - "doom": 12677, - "wages": 12678, - "pools": 12679, - "##nch": 12680, - "##bes": 12681, - "breasts": 12682, - "scholarly": 12683, - "alison": 12684, - "outline": 12685, - "brittany": 12686, - "breakthrough": 12687, - "willis": 12688, - "realistic": 12689, - "##cut": 12690, - "##boro": 12691, - "competitor": 12692, - "##stan": 12693, - "pike": 12694, - "picnic": 12695, - "icon": 12696, - "designing": 12697, - "commercials": 12698, - "washing": 12699, - "villain": 12700, - "skiing": 12701, - "micro": 12702, - "costumes": 12703, - "auburn": 12704, - "halted": 12705, - "executives": 12706, - "##hat": 12707, - "logistics": 12708, - "cycles": 12709, - "vowel": 12710, - "applicable": 12711, - "barrett": 12712, - "exclaimed": 12713, - "eurovision": 12714, - "eternity": 12715, - "ramon": 12716, - "##umi": 12717, - "##lls": 12718, - "modifications": 12719, - "sweeping": 12720, - "disgust": 12721, - "##uck": 12722, - "torch": 12723, - "aviv": 12724, - "ensuring": 12725, - "rude": 12726, - "dusty": 12727, - "sonic": 12728, - "donovan": 12729, - "outskirts": 12730, - "cu": 12731, - "pathway": 12732, - "##band": 12733, - "##gun": 12734, - "##lines": 12735, - "disciplines": 12736, - "acids": 12737, - "cadet": 12738, - "paired": 12739, - "##40": 12740, - "sketches": 12741, - "##sive": 12742, - "marriages": 12743, - "##⁺": 12744, - "folding": 12745, - "peers": 12746, - "slovak": 12747, - "implies": 12748, - "admired": 12749, - "##beck": 12750, - "1880s": 12751, - "leopold": 12752, - "instinct": 12753, - "attained": 12754, - "weston": 12755, - "megan": 12756, - "horace": 12757, - "##ination": 12758, - "dorsal": 12759, - "ingredients": 12760, - "evolutionary": 12761, - "##its": 12762, - "complications": 12763, - "deity": 12764, - "lethal": 12765, - "brushing": 12766, - "levy": 12767, - "deserted": 12768, - "institutes": 12769, - "posthumously": 12770, - "delivering": 12771, - "telescope": 12772, - "coronation": 12773, - "motivated": 12774, - "rapids": 12775, - "luc": 12776, - "flicked": 12777, - "pays": 12778, - "volcano": 12779, - "tanner": 12780, - "weighed": 12781, - "##nica": 12782, - "crowds": 12783, - "frankie": 12784, - "gifted": 12785, - "addressing": 12786, - "granddaughter": 12787, - "winding": 12788, - "##rna": 12789, - "constantine": 12790, - "gomez": 12791, - "##front": 12792, - "landscapes": 12793, - "rudolf": 12794, - "anthropology": 12795, - "slate": 12796, - "werewolf": 12797, - "##lio": 12798, - "astronomy": 12799, - "circa": 12800, - "rouge": 12801, - "dreaming": 12802, - "sack": 12803, - "knelt": 12804, - "drowned": 12805, - "naomi": 12806, - "prolific": 12807, - "tracked": 12808, - "freezing": 12809, - "herb": 12810, - "##dium": 12811, - "agony": 12812, - "randall": 12813, - "twisting": 12814, - "wendy": 12815, - "deposit": 12816, - "touches": 12817, - "vein": 12818, - "wheeler": 12819, - "##bbled": 12820, - "##bor": 12821, - "batted": 12822, - "retaining": 12823, - "tire": 12824, - "presently": 12825, - "compare": 12826, - "specification": 12827, - "daemon": 12828, - "nigel": 12829, - "##grave": 12830, - "merry": 12831, - "recommendation": 12832, - "czechoslovakia": 12833, - "sandra": 12834, - "ng": 12835, - "roma": 12836, - "##sts": 12837, - "lambert": 12838, - "inheritance": 12839, - "sheikh": 12840, - "winchester": 12841, - "cries": 12842, - "examining": 12843, - "##yle": 12844, - "comeback": 12845, - "cuisine": 12846, - "nave": 12847, - "##iv": 12848, - "ko": 12849, - "retrieve": 12850, - "tomatoes": 12851, - "barker": 12852, - "polished": 12853, - "defining": 12854, - "irene": 12855, - "lantern": 12856, - "personalities": 12857, - "begging": 12858, - "tract": 12859, - "swore": 12860, - "1809": 12861, - "175": 12862, - "##gic": 12863, - "omaha": 12864, - "brotherhood": 12865, - "##rley": 12866, - "haiti": 12867, - "##ots": 12868, - "exeter": 12869, - "##ete": 12870, - "##zia": 12871, - "steele": 12872, - "dumb": 12873, - "pearson": 12874, - "210": 12875, - "surveyed": 12876, - "elisabeth": 12877, - "trends": 12878, - "##ef": 12879, - "fritz": 12880, - "##rf": 12881, - "premium": 12882, - "bugs": 12883, - "fraction": 12884, - "calmly": 12885, - "viking": 12886, - "##birds": 12887, - "tug": 12888, - "inserted": 12889, - "unusually": 12890, - "##ield": 12891, - "confronted": 12892, - "distress": 12893, - "crashing": 12894, - "brent": 12895, - "turks": 12896, - "resign": 12897, - "##olo": 12898, - "cambodia": 12899, - "gabe": 12900, - "sauce": 12901, - "##kal": 12902, - "evelyn": 12903, - "116": 12904, - "extant": 12905, - "clusters": 12906, - "quarry": 12907, - "teenagers": 12908, - "luna": 12909, - "##lers": 12910, - "##ister": 12911, - "affiliation": 12912, - "drill": 12913, - "##ashi": 12914, - "panthers": 12915, - "scenic": 12916, - "libya": 12917, - "anita": 12918, - "strengthen": 12919, - "inscriptions": 12920, - "##cated": 12921, - "lace": 12922, - "sued": 12923, - "judith": 12924, - "riots": 12925, - "##uted": 12926, - "mint": 12927, - "##eta": 12928, - "preparations": 12929, - "midst": 12930, - "dub": 12931, - "challenger": 12932, - "##vich": 12933, - "mock": 12934, - "cf": 12935, - "displaced": 12936, - "wicket": 12937, - "breaths": 12938, - "enables": 12939, - "schmidt": 12940, - "analyst": 12941, - "##lum": 12942, - "ag": 12943, - "highlight": 12944, - "automotive": 12945, - "axe": 12946, - "josef": 12947, - "newark": 12948, - "sufficiently": 12949, - "resembles": 12950, - "50th": 12951, - "##pal": 12952, - "flushed": 12953, - "mum": 12954, - "traits": 12955, - "##ante": 12956, - "commodore": 12957, - "incomplete": 12958, - "warming": 12959, - "titular": 12960, - "ceremonial": 12961, - "ethical": 12962, - "118": 12963, - "celebrating": 12964, - "eighteenth": 12965, - "cao": 12966, - "lima": 12967, - "medalist": 12968, - "mobility": 12969, - "strips": 12970, - "snakes": 12971, - "##city": 12972, - "miniature": 12973, - "zagreb": 12974, - "barton": 12975, - "escapes": 12976, - "umbrella": 12977, - "automated": 12978, - "doubted": 12979, - "differs": 12980, - "cooled": 12981, - "georgetown": 12982, - "dresden": 12983, - "cooked": 12984, - "fade": 12985, - "wyatt": 12986, - "rna": 12987, - "jacobs": 12988, - "carlton": 12989, - "abundant": 12990, - "stereo": 12991, - "boost": 12992, - "madras": 12993, - "inning": 12994, - "##hia": 12995, - "spur": 12996, - "ip": 12997, - "malayalam": 12998, - "begged": 12999, - "osaka": 13000, - "groan": 13001, - "escaping": 13002, - "charging": 13003, - "dose": 13004, - "vista": 13005, - "##aj": 13006, - "bud": 13007, - "papa": 13008, - "communists": 13009, - "advocates": 13010, - "edged": 13011, - "tri": 13012, - "##cent": 13013, - "resemble": 13014, - "peaking": 13015, - "necklace": 13016, - "fried": 13017, - "montenegro": 13018, - "saxony": 13019, - "goose": 13020, - "glances": 13021, - "stuttgart": 13022, - "curator": 13023, - "recruit": 13024, - "grocery": 13025, - "sympathetic": 13026, - "##tting": 13027, - "##fort": 13028, - "127": 13029, - "lotus": 13030, - "randolph": 13031, - "ancestor": 13032, - "##rand": 13033, - "succeeding": 13034, - "jupiter": 13035, - "1798": 13036, - "macedonian": 13037, - "##heads": 13038, - "hiking": 13039, - "1808": 13040, - "handing": 13041, - "fischer": 13042, - "##itive": 13043, - "garbage": 13044, - "node": 13045, - "##pies": 13046, - "prone": 13047, - "singular": 13048, - "papua": 13049, - "inclined": 13050, - "attractions": 13051, - "italia": 13052, - "pouring": 13053, - "motioned": 13054, - "grandma": 13055, - "garnered": 13056, - "jacksonville": 13057, - "corp": 13058, - "ego": 13059, - "ringing": 13060, - "aluminum": 13061, - "##hausen": 13062, - "ordering": 13063, - "##foot": 13064, - "drawer": 13065, - "traders": 13066, - "synagogue": 13067, - "##play": 13068, - "##kawa": 13069, - "resistant": 13070, - "wandering": 13071, - "fragile": 13072, - "fiona": 13073, - "teased": 13074, - "var": 13075, - "hardcore": 13076, - "soaked": 13077, - "jubilee": 13078, - "decisive": 13079, - "exposition": 13080, - "mercer": 13081, - "poster": 13082, - "valencia": 13083, - "hale": 13084, - "kuwait": 13085, - "1811": 13086, - "##ises": 13087, - "##wr": 13088, - "##eed": 13089, - "tavern": 13090, - "gamma": 13091, - "122": 13092, - "johan": 13093, - "##uer": 13094, - "airways": 13095, - "amino": 13096, - "gil": 13097, - "##ury": 13098, - "vocational": 13099, - "domains": 13100, - "torres": 13101, - "##sp": 13102, - "generator": 13103, - "folklore": 13104, - "outcomes": 13105, - "##keeper": 13106, - "canberra": 13107, - "shooter": 13108, - "fl": 13109, - "beams": 13110, - "confrontation": 13111, - "##lling": 13112, - "##gram": 13113, - "feb": 13114, - "aligned": 13115, - "forestry": 13116, - "pipeline": 13117, - "jax": 13118, - "motorway": 13119, - "conception": 13120, - "decay": 13121, - "##tos": 13122, - "coffin": 13123, - "##cott": 13124, - "stalin": 13125, - "1805": 13126, - "escorted": 13127, - "minded": 13128, - "##nam": 13129, - "sitcom": 13130, - "purchasing": 13131, - "twilight": 13132, - "veronica": 13133, - "additions": 13134, - "passive": 13135, - "tensions": 13136, - "straw": 13137, - "123": 13138, - "frequencies": 13139, - "1804": 13140, - "refugee": 13141, - "cultivation": 13142, - "##iate": 13143, - "christie": 13144, - "clary": 13145, - "bulletin": 13146, - "crept": 13147, - "disposal": 13148, - "##rich": 13149, - "##zong": 13150, - "processor": 13151, - "crescent": 13152, - "##rol": 13153, - "bmw": 13154, - "emphasized": 13155, - "whale": 13156, - "nazis": 13157, - "aurora": 13158, - "##eng": 13159, - "dwelling": 13160, - "hauled": 13161, - "sponsors": 13162, - "toledo": 13163, - "mega": 13164, - "ideology": 13165, - "theatres": 13166, - "tessa": 13167, - "cerambycidae": 13168, - "saves": 13169, - "turtle": 13170, - "cone": 13171, - "suspects": 13172, - "kara": 13173, - "rusty": 13174, - "yelling": 13175, - "greeks": 13176, - "mozart": 13177, - "shades": 13178, - "cocked": 13179, - "participant": 13180, - "##tro": 13181, - "shire": 13182, - "spit": 13183, - "freeze": 13184, - "necessity": 13185, - "##cos": 13186, - "inmates": 13187, - "nielsen": 13188, - "councillors": 13189, - "loaned": 13190, - "uncommon": 13191, - "omar": 13192, - "peasants": 13193, - "botanical": 13194, - "offspring": 13195, - "daniels": 13196, - "formations": 13197, - "jokes": 13198, - "1794": 13199, - "pioneers": 13200, - "sigma": 13201, - "licensing": 13202, - "##sus": 13203, - "wheelchair": 13204, - "polite": 13205, - "1807": 13206, - "liquor": 13207, - "pratt": 13208, - "trustee": 13209, - "##uta": 13210, - "forewings": 13211, - "balloon": 13212, - "##zz": 13213, - "kilometre": 13214, - "camping": 13215, - "explicit": 13216, - "casually": 13217, - "shawn": 13218, - "foolish": 13219, - "teammates": 13220, - "nm": 13221, - "hassan": 13222, - "carrie": 13223, - "judged": 13224, - "satisfy": 13225, - "vanessa": 13226, - "knives": 13227, - "selective": 13228, - "cnn": 13229, - "flowed": 13230, - "##lice": 13231, - "eclipse": 13232, - "stressed": 13233, - "eliza": 13234, - "mathematician": 13235, - "cease": 13236, - "cultivated": 13237, - "##roy": 13238, - "commissions": 13239, - "browns": 13240, - "##ania": 13241, - "destroyers": 13242, - "sheridan": 13243, - "meadow": 13244, - "##rius": 13245, - "minerals": 13246, - "##cial": 13247, - "downstream": 13248, - "clash": 13249, - "gram": 13250, - "memoirs": 13251, - "ventures": 13252, - "baha": 13253, - "seymour": 13254, - "archie": 13255, - "midlands": 13256, - "edith": 13257, - "fare": 13258, - "flynn": 13259, - "invite": 13260, - "canceled": 13261, - "tiles": 13262, - "stabbed": 13263, - "boulder": 13264, - "incorporate": 13265, - "amended": 13266, - "camden": 13267, - "facial": 13268, - "mollusk": 13269, - "unreleased": 13270, - "descriptions": 13271, - "yoga": 13272, - "grabs": 13273, - "550": 13274, - "raises": 13275, - "ramp": 13276, - "shiver": 13277, - "##rose": 13278, - "coined": 13279, - "pioneering": 13280, - "tunes": 13281, - "qing": 13282, - "warwick": 13283, - "tops": 13284, - "119": 13285, - "melanie": 13286, - "giles": 13287, - "##rous": 13288, - "wandered": 13289, - "##inal": 13290, - "annexed": 13291, - "nov": 13292, - "30th": 13293, - "unnamed": 13294, - "##ished": 13295, - "organizational": 13296, - "airplane": 13297, - "normandy": 13298, - "stoke": 13299, - "whistle": 13300, - "blessing": 13301, - "violations": 13302, - "chased": 13303, - "holders": 13304, - "shotgun": 13305, - "##ctic": 13306, - "outlet": 13307, - "reactor": 13308, - "##vik": 13309, - "tires": 13310, - "tearing": 13311, - "shores": 13312, - "fortified": 13313, - "mascot": 13314, - "constituencies": 13315, - "nc": 13316, - "columnist": 13317, - "productive": 13318, - "tibet": 13319, - "##rta": 13320, - "lineage": 13321, - "hooked": 13322, - "oct": 13323, - "tapes": 13324, - "judging": 13325, - "cody": 13326, - "##gger": 13327, - "hansen": 13328, - "kashmir": 13329, - "triggered": 13330, - "##eva": 13331, - "solved": 13332, - "cliffs": 13333, - "##tree": 13334, - "resisted": 13335, - "anatomy": 13336, - "protesters": 13337, - "transparent": 13338, - "implied": 13339, - "##iga": 13340, - "injection": 13341, - "mattress": 13342, - "excluding": 13343, - "##mbo": 13344, - "defenses": 13345, - "helpless": 13346, - "devotion": 13347, - "##elli": 13348, - "growl": 13349, - "liberals": 13350, - "weber": 13351, - "phenomena": 13352, - "atoms": 13353, - "plug": 13354, - "##iff": 13355, - "mortality": 13356, - "apprentice": 13357, - "howe": 13358, - "convincing": 13359, - "aaa": 13360, - "swimmer": 13361, - "barber": 13362, - "leone": 13363, - "promptly": 13364, - "sodium": 13365, - "def": 13366, - "nowadays": 13367, - "arise": 13368, - "##oning": 13369, - "gloucester": 13370, - "corrected": 13371, - "dignity": 13372, - "norm": 13373, - "erie": 13374, - "##ders": 13375, - "elders": 13376, - "evacuated": 13377, - "sylvia": 13378, - "compression": 13379, - "##yar": 13380, - "hartford": 13381, - "pose": 13382, - "backpack": 13383, - "reasoning": 13384, - "accepts": 13385, - "24th": 13386, - "wipe": 13387, - "millimetres": 13388, - "marcel": 13389, - "##oda": 13390, - "dodgers": 13391, - "albion": 13392, - "1790": 13393, - "overwhelmed": 13394, - "aerospace": 13395, - "oaks": 13396, - "1795": 13397, - "showcase": 13398, - "acknowledge": 13399, - "recovering": 13400, - "nolan": 13401, - "ashe": 13402, - "hurts": 13403, - "geology": 13404, - "fashioned": 13405, - "disappearance": 13406, - "farewell": 13407, - "swollen": 13408, - "shrug": 13409, - "marquis": 13410, - "wimbledon": 13411, - "124": 13412, - "rue": 13413, - "1792": 13414, - "commemorate": 13415, - "reduces": 13416, - "experiencing": 13417, - "inevitable": 13418, - "calcutta": 13419, - "intel": 13420, - "##court": 13421, - "murderer": 13422, - "sticking": 13423, - "fisheries": 13424, - "imagery": 13425, - "bloom": 13426, - "280": 13427, - "brake": 13428, - "##inus": 13429, - "gustav": 13430, - "hesitation": 13431, - "memorable": 13432, - "po": 13433, - "viral": 13434, - "beans": 13435, - "accidents": 13436, - "tunisia": 13437, - "antenna": 13438, - "spilled": 13439, - "consort": 13440, - "treatments": 13441, - "aye": 13442, - "perimeter": 13443, - "##gard": 13444, - "donation": 13445, - "hostage": 13446, - "migrated": 13447, - "banker": 13448, - "addiction": 13449, - "apex": 13450, - "lil": 13451, - "trout": 13452, - "##ously": 13453, - "conscience": 13454, - "##nova": 13455, - "rams": 13456, - "sands": 13457, - "genome": 13458, - "passionate": 13459, - "troubles": 13460, - "##lets": 13461, - "##set": 13462, - "amid": 13463, - "##ibility": 13464, - "##ret": 13465, - "higgins": 13466, - "exceed": 13467, - "vikings": 13468, - "##vie": 13469, - "payne": 13470, - "##zan": 13471, - "muscular": 13472, - "##ste": 13473, - "defendant": 13474, - "sucking": 13475, - "##wal": 13476, - "ibrahim": 13477, - "fuselage": 13478, - "claudia": 13479, - "vfl": 13480, - "europeans": 13481, - "snails": 13482, - "interval": 13483, - "##garh": 13484, - "preparatory": 13485, - "statewide": 13486, - "tasked": 13487, - "lacrosse": 13488, - "viktor": 13489, - "##lation": 13490, - "angola": 13491, - "##hra": 13492, - "flint": 13493, - "implications": 13494, - "employs": 13495, - "teens": 13496, - "patrons": 13497, - "stall": 13498, - "weekends": 13499, - "barriers": 13500, - "scrambled": 13501, - "nucleus": 13502, - "tehran": 13503, - "jenna": 13504, - "parsons": 13505, - "lifelong": 13506, - "robots": 13507, - "displacement": 13508, - "5000": 13509, - "##bles": 13510, - "precipitation": 13511, - "##gt": 13512, - "knuckles": 13513, - "clutched": 13514, - "1802": 13515, - "marrying": 13516, - "ecology": 13517, - "marx": 13518, - "accusations": 13519, - "declare": 13520, - "scars": 13521, - "kolkata": 13522, - "mat": 13523, - "meadows": 13524, - "bermuda": 13525, - "skeleton": 13526, - "finalists": 13527, - "vintage": 13528, - "crawl": 13529, - "coordinate": 13530, - "affects": 13531, - "subjected": 13532, - "orchestral": 13533, - "mistaken": 13534, - "##tc": 13535, - "mirrors": 13536, - "dipped": 13537, - "relied": 13538, - "260": 13539, - "arches": 13540, - "candle": 13541, - "##nick": 13542, - "incorporating": 13543, - "wildly": 13544, - "fond": 13545, - "basilica": 13546, - "owl": 13547, - "fringe": 13548, - "rituals": 13549, - "whispering": 13550, - "stirred": 13551, - "feud": 13552, - "tertiary": 13553, - "slick": 13554, - "goat": 13555, - "honorable": 13556, - "whereby": 13557, - "skip": 13558, - "ricardo": 13559, - "stripes": 13560, - "parachute": 13561, - "adjoining": 13562, - "submerged": 13563, - "synthesizer": 13564, - "##gren": 13565, - "intend": 13566, - "positively": 13567, - "ninety": 13568, - "phi": 13569, - "beaver": 13570, - "partition": 13571, - "fellows": 13572, - "alexis": 13573, - "prohibition": 13574, - "carlisle": 13575, - "bizarre": 13576, - "fraternity": 13577, - "##bre": 13578, - "doubts": 13579, - "icy": 13580, - "cbc": 13581, - "aquatic": 13582, - "sneak": 13583, - "sonny": 13584, - "combines": 13585, - "airports": 13586, - "crude": 13587, - "supervised": 13588, - "spatial": 13589, - "merge": 13590, - "alfonso": 13591, - "##bic": 13592, - "corrupt": 13593, - "scan": 13594, - "undergo": 13595, - "##ams": 13596, - "disabilities": 13597, - "colombian": 13598, - "comparing": 13599, - "dolphins": 13600, - "perkins": 13601, - "##lish": 13602, - "reprinted": 13603, - "unanimous": 13604, - "bounced": 13605, - "hairs": 13606, - "underworld": 13607, - "midwest": 13608, - "semester": 13609, - "bucket": 13610, - "paperback": 13611, - "miniseries": 13612, - "coventry": 13613, - "demise": 13614, - "##leigh": 13615, - "demonstrations": 13616, - "sensor": 13617, - "rotating": 13618, - "yan": 13619, - "##hler": 13620, - "arrange": 13621, - "soils": 13622, - "##idge": 13623, - "hyderabad": 13624, - "labs": 13625, - "##dr": 13626, - "brakes": 13627, - "grandchildren": 13628, - "##nde": 13629, - "negotiated": 13630, - "rover": 13631, - "ferrari": 13632, - "continuation": 13633, - "directorate": 13634, - "augusta": 13635, - "stevenson": 13636, - "counterpart": 13637, - "gore": 13638, - "##rda": 13639, - "nursery": 13640, - "rican": 13641, - "ave": 13642, - "collectively": 13643, - "broadly": 13644, - "pastoral": 13645, - "repertoire": 13646, - "asserted": 13647, - "discovering": 13648, - "nordic": 13649, - "styled": 13650, - "fiba": 13651, - "cunningham": 13652, - "harley": 13653, - "middlesex": 13654, - "survives": 13655, - "tumor": 13656, - "tempo": 13657, - "zack": 13658, - "aiming": 13659, - "lok": 13660, - "urgent": 13661, - "##rade": 13662, - "##nto": 13663, - "devils": 13664, - "##ement": 13665, - "contractor": 13666, - "turin": 13667, - "##wl": 13668, - "##ool": 13669, - "bliss": 13670, - "repaired": 13671, - "simmons": 13672, - "moan": 13673, - "astronomical": 13674, - "cr": 13675, - "negotiate": 13676, - "lyric": 13677, - "1890s": 13678, - "lara": 13679, - "bred": 13680, - "clad": 13681, - "angus": 13682, - "pbs": 13683, - "##ience": 13684, - "engineered": 13685, - "posed": 13686, - "##lk": 13687, - "hernandez": 13688, - "possessions": 13689, - "elbows": 13690, - "psychiatric": 13691, - "strokes": 13692, - "confluence": 13693, - "electorate": 13694, - "lifts": 13695, - "campuses": 13696, - "lava": 13697, - "alps": 13698, - "##ep": 13699, - "##ution": 13700, - "##date": 13701, - "physicist": 13702, - "woody": 13703, - "##page": 13704, - "##ographic": 13705, - "##itis": 13706, - "juliet": 13707, - "reformation": 13708, - "sparhawk": 13709, - "320": 13710, - "complement": 13711, - "suppressed": 13712, - "jewel": 13713, - "##½": 13714, - "floated": 13715, - "##kas": 13716, - "continuity": 13717, - "sadly": 13718, - "##ische": 13719, - "inability": 13720, - "melting": 13721, - "scanning": 13722, - "paula": 13723, - "flour": 13724, - "judaism": 13725, - "safer": 13726, - "vague": 13727, - "##lm": 13728, - "solving": 13729, - "curb": 13730, - "##stown": 13731, - "financially": 13732, - "gable": 13733, - "bees": 13734, - "expired": 13735, - "miserable": 13736, - "cassidy": 13737, - "dominion": 13738, - "1789": 13739, - "cupped": 13740, - "145": 13741, - "robbery": 13742, - "facto": 13743, - "amos": 13744, - "warden": 13745, - "resume": 13746, - "tallest": 13747, - "marvin": 13748, - "ing": 13749, - "pounded": 13750, - "usd": 13751, - "declaring": 13752, - "gasoline": 13753, - "##aux": 13754, - "darkened": 13755, - "270": 13756, - "650": 13757, - "sophomore": 13758, - "##mere": 13759, - "erection": 13760, - "gossip": 13761, - "televised": 13762, - "risen": 13763, - "dial": 13764, - "##eu": 13765, - "pillars": 13766, - "##link": 13767, - "passages": 13768, - "profound": 13769, - "##tina": 13770, - "arabian": 13771, - "ashton": 13772, - "silicon": 13773, - "nail": 13774, - "##ead": 13775, - "##lated": 13776, - "##wer": 13777, - "##hardt": 13778, - "fleming": 13779, - "firearms": 13780, - "ducked": 13781, - "circuits": 13782, - "blows": 13783, - "waterloo": 13784, - "titans": 13785, - "##lina": 13786, - "atom": 13787, - "fireplace": 13788, - "cheshire": 13789, - "financed": 13790, - "activation": 13791, - "algorithms": 13792, - "##zzi": 13793, - "constituent": 13794, - "catcher": 13795, - "cherokee": 13796, - "partnerships": 13797, - "sexuality": 13798, - "platoon": 13799, - "tragic": 13800, - "vivian": 13801, - "guarded": 13802, - "whiskey": 13803, - "meditation": 13804, - "poetic": 13805, - "##late": 13806, - "##nga": 13807, - "##ake": 13808, - "porto": 13809, - "listeners": 13810, - "dominance": 13811, - "kendra": 13812, - "mona": 13813, - "chandler": 13814, - "factions": 13815, - "22nd": 13816, - "salisbury": 13817, - "attitudes": 13818, - "derivative": 13819, - "##ido": 13820, - "##haus": 13821, - "intake": 13822, - "paced": 13823, - "javier": 13824, - "illustrator": 13825, - "barrels": 13826, - "bias": 13827, - "cockpit": 13828, - "burnett": 13829, - "dreamed": 13830, - "ensuing": 13831, - "##anda": 13832, - "receptors": 13833, - "someday": 13834, - "hawkins": 13835, - "mattered": 13836, - "##lal": 13837, - "slavic": 13838, - "1799": 13839, - "jesuit": 13840, - "cameroon": 13841, - "wasted": 13842, - "tai": 13843, - "wax": 13844, - "lowering": 13845, - "victorious": 13846, - "freaking": 13847, - "outright": 13848, - "hancock": 13849, - "librarian": 13850, - "sensing": 13851, - "bald": 13852, - "calcium": 13853, - "myers": 13854, - "tablet": 13855, - "announcing": 13856, - "barack": 13857, - "shipyard": 13858, - "pharmaceutical": 13859, - "##uan": 13860, - "greenwich": 13861, - "flush": 13862, - "medley": 13863, - "patches": 13864, - "wolfgang": 13865, - "pt": 13866, - "speeches": 13867, - "acquiring": 13868, - "exams": 13869, - "nikolai": 13870, - "##gg": 13871, - "hayden": 13872, - "kannada": 13873, - "##type": 13874, - "reilly": 13875, - "##pt": 13876, - "waitress": 13877, - "abdomen": 13878, - "devastated": 13879, - "capped": 13880, - "pseudonym": 13881, - "pharmacy": 13882, - "fulfill": 13883, - "paraguay": 13884, - "1796": 13885, - "clicked": 13886, - "##trom": 13887, - "archipelago": 13888, - "syndicated": 13889, - "##hman": 13890, - "lumber": 13891, - "orgasm": 13892, - "rejection": 13893, - "clifford": 13894, - "lorraine": 13895, - "advent": 13896, - "mafia": 13897, - "rodney": 13898, - "brock": 13899, - "##ght": 13900, - "##used": 13901, - "##elia": 13902, - "cassette": 13903, - "chamberlain": 13904, - "despair": 13905, - "mongolia": 13906, - "sensors": 13907, - "developmental": 13908, - "upstream": 13909, - "##eg": 13910, - "##alis": 13911, - "spanning": 13912, - "165": 13913, - "trombone": 13914, - "basque": 13915, - "seeded": 13916, - "interred": 13917, - "renewable": 13918, - "rhys": 13919, - "leapt": 13920, - "revision": 13921, - "molecule": 13922, - "##ages": 13923, - "chord": 13924, - "vicious": 13925, - "nord": 13926, - "shivered": 13927, - "23rd": 13928, - "arlington": 13929, - "debts": 13930, - "corpus": 13931, - "sunrise": 13932, - "bays": 13933, - "blackburn": 13934, - "centimetres": 13935, - "##uded": 13936, - "shuddered": 13937, - "gm": 13938, - "strangely": 13939, - "gripping": 13940, - "cartoons": 13941, - "isabelle": 13942, - "orbital": 13943, - "##ppa": 13944, - "seals": 13945, - "proving": 13946, - "##lton": 13947, - "refusal": 13948, - "strengthened": 13949, - "bust": 13950, - "assisting": 13951, - "baghdad": 13952, - "batsman": 13953, - "portrayal": 13954, - "mara": 13955, - "pushes": 13956, - "spears": 13957, - "og": 13958, - "##cock": 13959, - "reside": 13960, - "nathaniel": 13961, - "brennan": 13962, - "1776": 13963, - "confirmation": 13964, - "caucus": 13965, - "##worthy": 13966, - "markings": 13967, - "yemen": 13968, - "nobles": 13969, - "ku": 13970, - "lazy": 13971, - "viewer": 13972, - "catalan": 13973, - "encompasses": 13974, - "sawyer": 13975, - "##fall": 13976, - "sparked": 13977, - "substances": 13978, - "patents": 13979, - "braves": 13980, - "arranger": 13981, - "evacuation": 13982, - "sergio": 13983, - "persuade": 13984, - "dover": 13985, - "tolerance": 13986, - "penguin": 13987, - "cum": 13988, - "jockey": 13989, - "insufficient": 13990, - "townships": 13991, - "occupying": 13992, - "declining": 13993, - "plural": 13994, - "processed": 13995, - "projection": 13996, - "puppet": 13997, - "flanders": 13998, - "introduces": 13999, - "liability": 14000, - "##yon": 14001, - "gymnastics": 14002, - "antwerp": 14003, - "taipei": 14004, - "hobart": 14005, - "candles": 14006, - "jeep": 14007, - "wes": 14008, - "observers": 14009, - "126": 14010, - "chaplain": 14011, - "bundle": 14012, - "glorious": 14013, - "##hine": 14014, - "hazel": 14015, - "flung": 14016, - "sol": 14017, - "excavations": 14018, - "dumped": 14019, - "stares": 14020, - "sh": 14021, - "bangalore": 14022, - "triangular": 14023, - "icelandic": 14024, - "intervals": 14025, - "expressing": 14026, - "turbine": 14027, - "##vers": 14028, - "songwriting": 14029, - "crafts": 14030, - "##igo": 14031, - "jasmine": 14032, - "ditch": 14033, - "rite": 14034, - "##ways": 14035, - "entertaining": 14036, - "comply": 14037, - "sorrow": 14038, - "wrestlers": 14039, - "basel": 14040, - "emirates": 14041, - "marian": 14042, - "rivera": 14043, - "helpful": 14044, - "##some": 14045, - "caution": 14046, - "downward": 14047, - "networking": 14048, - "##atory": 14049, - "##tered": 14050, - "darted": 14051, - "genocide": 14052, - "emergence": 14053, - "replies": 14054, - "specializing": 14055, - "spokesman": 14056, - "convenient": 14057, - "unlocked": 14058, - "fading": 14059, - "augustine": 14060, - "concentrations": 14061, - "resemblance": 14062, - "elijah": 14063, - "investigator": 14064, - "andhra": 14065, - "##uda": 14066, - "promotes": 14067, - "bean": 14068, - "##rrell": 14069, - "fleeing": 14070, - "wan": 14071, - "simone": 14072, - "announcer": 14073, - "##ame": 14074, - "##bby": 14075, - "lydia": 14076, - "weaver": 14077, - "132": 14078, - "residency": 14079, - "modification": 14080, - "##fest": 14081, - "stretches": 14082, - "##ast": 14083, - "alternatively": 14084, - "nat": 14085, - "lowe": 14086, - "lacks": 14087, - "##ented": 14088, - "pam": 14089, - "tile": 14090, - "concealed": 14091, - "inferior": 14092, - "abdullah": 14093, - "residences": 14094, - "tissues": 14095, - "vengeance": 14096, - "##ided": 14097, - "moisture": 14098, - "peculiar": 14099, - "groove": 14100, - "zip": 14101, - "bologna": 14102, - "jennings": 14103, - "ninja": 14104, - "oversaw": 14105, - "zombies": 14106, - "pumping": 14107, - "batch": 14108, - "livingston": 14109, - "emerald": 14110, - "installations": 14111, - "1797": 14112, - "peel": 14113, - "nitrogen": 14114, - "rama": 14115, - "##fying": 14116, - "##star": 14117, - "schooling": 14118, - "strands": 14119, - "responding": 14120, - "werner": 14121, - "##ost": 14122, - "lime": 14123, - "casa": 14124, - "accurately": 14125, - "targeting": 14126, - "##rod": 14127, - "underway": 14128, - "##uru": 14129, - "hemisphere": 14130, - "lester": 14131, - "##yard": 14132, - "occupies": 14133, - "2d": 14134, - "griffith": 14135, - "angrily": 14136, - "reorganized": 14137, - "##owing": 14138, - "courtney": 14139, - "deposited": 14140, - "##dd": 14141, - "##30": 14142, - "estadio": 14143, - "##ifies": 14144, - "dunn": 14145, - "exiled": 14146, - "##ying": 14147, - "checks": 14148, - "##combe": 14149, - "##о": 14150, - "##fly": 14151, - "successes": 14152, - "unexpectedly": 14153, - "blu": 14154, - "assessed": 14155, - "##flower": 14156, - "##ه": 14157, - "observing": 14158, - "sacked": 14159, - "spiders": 14160, - "kn": 14161, - "##tail": 14162, - "mu": 14163, - "nodes": 14164, - "prosperity": 14165, - "audrey": 14166, - "divisional": 14167, - "155": 14168, - "broncos": 14169, - "tangled": 14170, - "adjust": 14171, - "feeds": 14172, - "erosion": 14173, - "paolo": 14174, - "surf": 14175, - "directory": 14176, - "snatched": 14177, - "humid": 14178, - "admiralty": 14179, - "screwed": 14180, - "gt": 14181, - "reddish": 14182, - "##nese": 14183, - "modules": 14184, - "trench": 14185, - "lamps": 14186, - "bind": 14187, - "leah": 14188, - "bucks": 14189, - "competes": 14190, - "##nz": 14191, - "##form": 14192, - "transcription": 14193, - "##uc": 14194, - "isles": 14195, - "violently": 14196, - "clutching": 14197, - "pga": 14198, - "cyclist": 14199, - "inflation": 14200, - "flats": 14201, - "ragged": 14202, - "unnecessary": 14203, - "##hian": 14204, - "stubborn": 14205, - "coordinated": 14206, - "harriet": 14207, - "baba": 14208, - "disqualified": 14209, - "330": 14210, - "insect": 14211, - "wolfe": 14212, - "##fies": 14213, - "reinforcements": 14214, - "rocked": 14215, - "duel": 14216, - "winked": 14217, - "embraced": 14218, - "bricks": 14219, - "##raj": 14220, - "hiatus": 14221, - "defeats": 14222, - "pending": 14223, - "brightly": 14224, - "jealousy": 14225, - "##xton": 14226, - "##hm": 14227, - "##uki": 14228, - "lena": 14229, - "gdp": 14230, - "colorful": 14231, - "##dley": 14232, - "stein": 14233, - "kidney": 14234, - "##shu": 14235, - "underwear": 14236, - "wanderers": 14237, - "##haw": 14238, - "##icus": 14239, - "guardians": 14240, - "m³": 14241, - "roared": 14242, - "habits": 14243, - "##wise": 14244, - "permits": 14245, - "gp": 14246, - "uranium": 14247, - "punished": 14248, - "disguise": 14249, - "bundesliga": 14250, - "elise": 14251, - "dundee": 14252, - "erotic": 14253, - "partisan": 14254, - "pi": 14255, - "collectors": 14256, - "float": 14257, - "individually": 14258, - "rendering": 14259, - "behavioral": 14260, - "bucharest": 14261, - "ser": 14262, - "hare": 14263, - "valerie": 14264, - "corporal": 14265, - "nutrition": 14266, - "proportional": 14267, - "##isa": 14268, - "immense": 14269, - "##kis": 14270, - "pavement": 14271, - "##zie": 14272, - "##eld": 14273, - "sutherland": 14274, - "crouched": 14275, - "1775": 14276, - "##lp": 14277, - "suzuki": 14278, - "trades": 14279, - "endurance": 14280, - "operas": 14281, - "crosby": 14282, - "prayed": 14283, - "priory": 14284, - "rory": 14285, - "socially": 14286, - "##urn": 14287, - "gujarat": 14288, - "##pu": 14289, - "walton": 14290, - "cube": 14291, - "pasha": 14292, - "privilege": 14293, - "lennon": 14294, - "floods": 14295, - "thorne": 14296, - "waterfall": 14297, - "nipple": 14298, - "scouting": 14299, - "approve": 14300, - "##lov": 14301, - "minorities": 14302, - "voter": 14303, - "dwight": 14304, - "extensions": 14305, - "assure": 14306, - "ballroom": 14307, - "slap": 14308, - "dripping": 14309, - "privileges": 14310, - "rejoined": 14311, - "confessed": 14312, - "demonstrating": 14313, - "patriotic": 14314, - "yell": 14315, - "investor": 14316, - "##uth": 14317, - "pagan": 14318, - "slumped": 14319, - "squares": 14320, - "##cle": 14321, - "##kins": 14322, - "confront": 14323, - "bert": 14324, - "embarrassment": 14325, - "##aid": 14326, - "aston": 14327, - "urging": 14328, - "sweater": 14329, - "starr": 14330, - "yuri": 14331, - "brains": 14332, - "williamson": 14333, - "commuter": 14334, - "mortar": 14335, - "structured": 14336, - "selfish": 14337, - "exports": 14338, - "##jon": 14339, - "cds": 14340, - "##him": 14341, - "unfinished": 14342, - "##rre": 14343, - "mortgage": 14344, - "destinations": 14345, - "##nagar": 14346, - "canoe": 14347, - "solitary": 14348, - "buchanan": 14349, - "delays": 14350, - "magistrate": 14351, - "fk": 14352, - "##pling": 14353, - "motivation": 14354, - "##lier": 14355, - "##vier": 14356, - "recruiting": 14357, - "assess": 14358, - "##mouth": 14359, - "malik": 14360, - "antique": 14361, - "1791": 14362, - "pius": 14363, - "rahman": 14364, - "reich": 14365, - "tub": 14366, - "zhou": 14367, - "smashed": 14368, - "airs": 14369, - "galway": 14370, - "xii": 14371, - "conditioning": 14372, - "honduras": 14373, - "discharged": 14374, - "dexter": 14375, - "##pf": 14376, - "lionel": 14377, - "129": 14378, - "debates": 14379, - "lemon": 14380, - "tiffany": 14381, - "volunteered": 14382, - "dom": 14383, - "dioxide": 14384, - "procession": 14385, - "devi": 14386, - "sic": 14387, - "tremendous": 14388, - "advertisements": 14389, - "colts": 14390, - "transferring": 14391, - "verdict": 14392, - "hanover": 14393, - "decommissioned": 14394, - "utter": 14395, - "relate": 14396, - "pac": 14397, - "racism": 14398, - "##top": 14399, - "beacon": 14400, - "limp": 14401, - "similarity": 14402, - "terra": 14403, - "occurrence": 14404, - "ant": 14405, - "##how": 14406, - "becky": 14407, - "capt": 14408, - "updates": 14409, - "armament": 14410, - "richie": 14411, - "pal": 14412, - "##graph": 14413, - "halloween": 14414, - "mayo": 14415, - "##ssen": 14416, - "##bone": 14417, - "cara": 14418, - "serena": 14419, - "fcc": 14420, - "dolls": 14421, - "obligations": 14422, - "##dling": 14423, - "violated": 14424, - "lafayette": 14425, - "jakarta": 14426, - "exploitation": 14427, - "##ime": 14428, - "infamous": 14429, - "iconic": 14430, - "##lah": 14431, - "##park": 14432, - "kitty": 14433, - "moody": 14434, - "reginald": 14435, - "dread": 14436, - "spill": 14437, - "crystals": 14438, - "olivier": 14439, - "modeled": 14440, - "bluff": 14441, - "equilibrium": 14442, - "separating": 14443, - "notices": 14444, - "ordnance": 14445, - "extinction": 14446, - "onset": 14447, - "cosmic": 14448, - "attachment": 14449, - "sammy": 14450, - "expose": 14451, - "privy": 14452, - "anchored": 14453, - "##bil": 14454, - "abbott": 14455, - "admits": 14456, - "bending": 14457, - "baritone": 14458, - "emmanuel": 14459, - "policeman": 14460, - "vaughan": 14461, - "winged": 14462, - "climax": 14463, - "dresses": 14464, - "denny": 14465, - "polytechnic": 14466, - "mohamed": 14467, - "burmese": 14468, - "authentic": 14469, - "nikki": 14470, - "genetics": 14471, - "grandparents": 14472, - "homestead": 14473, - "gaza": 14474, - "postponed": 14475, - "metacritic": 14476, - "una": 14477, - "##sby": 14478, - "##bat": 14479, - "unstable": 14480, - "dissertation": 14481, - "##rial": 14482, - "##cian": 14483, - "curls": 14484, - "obscure": 14485, - "uncovered": 14486, - "bronx": 14487, - "praying": 14488, - "disappearing": 14489, - "##hoe": 14490, - "prehistoric": 14491, - "coke": 14492, - "turret": 14493, - "mutations": 14494, - "nonprofit": 14495, - "pits": 14496, - "monaco": 14497, - "##ي": 14498, - "##usion": 14499, - "prominently": 14500, - "dispatched": 14501, - "podium": 14502, - "##mir": 14503, - "uci": 14504, - "##uation": 14505, - "133": 14506, - "fortifications": 14507, - "birthplace": 14508, - "kendall": 14509, - "##lby": 14510, - "##oll": 14511, - "preacher": 14512, - "rack": 14513, - "goodman": 14514, - "##rman": 14515, - "persistent": 14516, - "##ott": 14517, - "countless": 14518, - "jaime": 14519, - "recorder": 14520, - "lexington": 14521, - "persecution": 14522, - "jumps": 14523, - "renewal": 14524, - "wagons": 14525, - "##11": 14526, - "crushing": 14527, - "##holder": 14528, - "decorations": 14529, - "##lake": 14530, - "abundance": 14531, - "wrath": 14532, - "laundry": 14533, - "£1": 14534, - "garde": 14535, - "##rp": 14536, - "jeanne": 14537, - "beetles": 14538, - "peasant": 14539, - "##sl": 14540, - "splitting": 14541, - "caste": 14542, - "sergei": 14543, - "##rer": 14544, - "##ema": 14545, - "scripts": 14546, - "##ively": 14547, - "rub": 14548, - "satellites": 14549, - "##vor": 14550, - "inscribed": 14551, - "verlag": 14552, - "scrapped": 14553, - "gale": 14554, - "packages": 14555, - "chick": 14556, - "potato": 14557, - "slogan": 14558, - "kathleen": 14559, - "arabs": 14560, - "##culture": 14561, - "counterparts": 14562, - "reminiscent": 14563, - "choral": 14564, - "##tead": 14565, - "rand": 14566, - "retains": 14567, - "bushes": 14568, - "dane": 14569, - "accomplish": 14570, - "courtesy": 14571, - "closes": 14572, - "##oth": 14573, - "slaughter": 14574, - "hague": 14575, - "krakow": 14576, - "lawson": 14577, - "tailed": 14578, - "elias": 14579, - "ginger": 14580, - "##ttes": 14581, - "canopy": 14582, - "betrayal": 14583, - "rebuilding": 14584, - "turf": 14585, - "##hof": 14586, - "frowning": 14587, - "allegiance": 14588, - "brigades": 14589, - "kicks": 14590, - "rebuild": 14591, - "polls": 14592, - "alias": 14593, - "nationalism": 14594, - "td": 14595, - "rowan": 14596, - "audition": 14597, - "bowie": 14598, - "fortunately": 14599, - "recognizes": 14600, - "harp": 14601, - "dillon": 14602, - "horrified": 14603, - "##oro": 14604, - "renault": 14605, - "##tics": 14606, - "ropes": 14607, - "##α": 14608, - "presumed": 14609, - "rewarded": 14610, - "infrared": 14611, - "wiping": 14612, - "accelerated": 14613, - "illustration": 14614, - "##rid": 14615, - "presses": 14616, - "practitioners": 14617, - "badminton": 14618, - "##iard": 14619, - "detained": 14620, - "##tera": 14621, - "recognizing": 14622, - "relates": 14623, - "misery": 14624, - "##sies": 14625, - "##tly": 14626, - "reproduction": 14627, - "piercing": 14628, - "potatoes": 14629, - "thornton": 14630, - "esther": 14631, - "manners": 14632, - "hbo": 14633, - "##aan": 14634, - "ours": 14635, - "bullshit": 14636, - "ernie": 14637, - "perennial": 14638, - "sensitivity": 14639, - "illuminated": 14640, - "rupert": 14641, - "##jin": 14642, - "##iss": 14643, - "##ear": 14644, - "rfc": 14645, - "nassau": 14646, - "##dock": 14647, - "staggered": 14648, - "socialism": 14649, - "##haven": 14650, - "appointments": 14651, - "nonsense": 14652, - "prestige": 14653, - "sharma": 14654, - "haul": 14655, - "##tical": 14656, - "solidarity": 14657, - "gps": 14658, - "##ook": 14659, - "##rata": 14660, - "igor": 14661, - "pedestrian": 14662, - "##uit": 14663, - "baxter": 14664, - "tenants": 14665, - "wires": 14666, - "medication": 14667, - "unlimited": 14668, - "guiding": 14669, - "impacts": 14670, - "diabetes": 14671, - "##rama": 14672, - "sasha": 14673, - "pas": 14674, - "clive": 14675, - "extraction": 14676, - "131": 14677, - "continually": 14678, - "constraints": 14679, - "##bilities": 14680, - "sonata": 14681, - "hunted": 14682, - "sixteenth": 14683, - "chu": 14684, - "planting": 14685, - "quote": 14686, - "mayer": 14687, - "pretended": 14688, - "abs": 14689, - "spat": 14690, - "##hua": 14691, - "ceramic": 14692, - "##cci": 14693, - "curtains": 14694, - "pigs": 14695, - "pitching": 14696, - "##dad": 14697, - "latvian": 14698, - "sore": 14699, - "dayton": 14700, - "##sted": 14701, - "##qi": 14702, - "patrols": 14703, - "slice": 14704, - "playground": 14705, - "##nted": 14706, - "shone": 14707, - "stool": 14708, - "apparatus": 14709, - "inadequate": 14710, - "mates": 14711, - "treason": 14712, - "##ija": 14713, - "desires": 14714, - "##liga": 14715, - "##croft": 14716, - "somalia": 14717, - "laurent": 14718, - "mir": 14719, - "leonardo": 14720, - "oracle": 14721, - "grape": 14722, - "obliged": 14723, - "chevrolet": 14724, - "thirteenth": 14725, - "stunning": 14726, - "enthusiastic": 14727, - "##ede": 14728, - "accounted": 14729, - "concludes": 14730, - "currents": 14731, - "basil": 14732, - "##kovic": 14733, - "drought": 14734, - "##rica": 14735, - "mai": 14736, - "##aire": 14737, - "shove": 14738, - "posting": 14739, - "##shed": 14740, - "pilgrimage": 14741, - "humorous": 14742, - "packing": 14743, - "fry": 14744, - "pencil": 14745, - "wines": 14746, - "smells": 14747, - "144": 14748, - "marilyn": 14749, - "aching": 14750, - "newest": 14751, - "clung": 14752, - "bon": 14753, - "neighbours": 14754, - "sanctioned": 14755, - "##pie": 14756, - "mug": 14757, - "##stock": 14758, - "drowning": 14759, - "##mma": 14760, - "hydraulic": 14761, - "##vil": 14762, - "hiring": 14763, - "reminder": 14764, - "lilly": 14765, - "investigators": 14766, - "##ncies": 14767, - "sour": 14768, - "##eous": 14769, - "compulsory": 14770, - "packet": 14771, - "##rion": 14772, - "##graphic": 14773, - "##elle": 14774, - "cannes": 14775, - "##inate": 14776, - "depressed": 14777, - "##rit": 14778, - "heroic": 14779, - "importantly": 14780, - "theresa": 14781, - "##tled": 14782, - "conway": 14783, - "saturn": 14784, - "marginal": 14785, - "rae": 14786, - "##xia": 14787, - "corresponds": 14788, - "royce": 14789, - "pact": 14790, - "jasper": 14791, - "explosives": 14792, - "packaging": 14793, - "aluminium": 14794, - "##ttered": 14795, - "denotes": 14796, - "rhythmic": 14797, - "spans": 14798, - "assignments": 14799, - "hereditary": 14800, - "outlined": 14801, - "originating": 14802, - "sundays": 14803, - "lad": 14804, - "reissued": 14805, - "greeting": 14806, - "beatrice": 14807, - "##dic": 14808, - "pillar": 14809, - "marcos": 14810, - "plots": 14811, - "handbook": 14812, - "alcoholic": 14813, - "judiciary": 14814, - "avant": 14815, - "slides": 14816, - "extract": 14817, - "masculine": 14818, - "blur": 14819, - "##eum": 14820, - "##force": 14821, - "homage": 14822, - "trembled": 14823, - "owens": 14824, - "hymn": 14825, - "trey": 14826, - "omega": 14827, - "signaling": 14828, - "socks": 14829, - "accumulated": 14830, - "reacted": 14831, - "attic": 14832, - "theo": 14833, - "lining": 14834, - "angie": 14835, - "distraction": 14836, - "primera": 14837, - "talbot": 14838, - "##key": 14839, - "1200": 14840, - "ti": 14841, - "creativity": 14842, - "billed": 14843, - "##hey": 14844, - "deacon": 14845, - "eduardo": 14846, - "identifies": 14847, - "proposition": 14848, - "dizzy": 14849, - "gunner": 14850, - "hogan": 14851, - "##yam": 14852, - "##pping": 14853, - "##hol": 14854, - "ja": 14855, - "##chan": 14856, - "jensen": 14857, - "reconstructed": 14858, - "##berger": 14859, - "clearance": 14860, - "darius": 14861, - "##nier": 14862, - "abe": 14863, - "harlem": 14864, - "plea": 14865, - "dei": 14866, - "circled": 14867, - "emotionally": 14868, - "notation": 14869, - "fascist": 14870, - "neville": 14871, - "exceeded": 14872, - "upwards": 14873, - "viable": 14874, - "ducks": 14875, - "##fo": 14876, - "workforce": 14877, - "racer": 14878, - "limiting": 14879, - "shri": 14880, - "##lson": 14881, - "possesses": 14882, - "1600": 14883, - "kerr": 14884, - "moths": 14885, - "devastating": 14886, - "laden": 14887, - "disturbing": 14888, - "locking": 14889, - "##cture": 14890, - "gal": 14891, - "fearing": 14892, - "accreditation": 14893, - "flavor": 14894, - "aide": 14895, - "1870s": 14896, - "mountainous": 14897, - "##baum": 14898, - "melt": 14899, - "##ures": 14900, - "motel": 14901, - "texture": 14902, - "servers": 14903, - "soda": 14904, - "##mb": 14905, - "herd": 14906, - "##nium": 14907, - "erect": 14908, - "puzzled": 14909, - "hum": 14910, - "peggy": 14911, - "examinations": 14912, - "gould": 14913, - "testified": 14914, - "geoff": 14915, - "ren": 14916, - "devised": 14917, - "sacks": 14918, - "##law": 14919, - "denial": 14920, - "posters": 14921, - "grunted": 14922, - "cesar": 14923, - "tutor": 14924, - "ec": 14925, - "gerry": 14926, - "offerings": 14927, - "byrne": 14928, - "falcons": 14929, - "combinations": 14930, - "ct": 14931, - "incoming": 14932, - "pardon": 14933, - "rocking": 14934, - "26th": 14935, - "avengers": 14936, - "flared": 14937, - "mankind": 14938, - "seller": 14939, - "uttar": 14940, - "loch": 14941, - "nadia": 14942, - "stroking": 14943, - "exposing": 14944, - "##hd": 14945, - "fertile": 14946, - "ancestral": 14947, - "instituted": 14948, - "##has": 14949, - "noises": 14950, - "prophecy": 14951, - "taxation": 14952, - "eminent": 14953, - "vivid": 14954, - "pol": 14955, - "##bol": 14956, - "dart": 14957, - "indirect": 14958, - "multimedia": 14959, - "notebook": 14960, - "upside": 14961, - "displaying": 14962, - "adrenaline": 14963, - "referenced": 14964, - "geometric": 14965, - "##iving": 14966, - "progression": 14967, - "##ddy": 14968, - "blunt": 14969, - "announce": 14970, - "##far": 14971, - "implementing": 14972, - "##lav": 14973, - "aggression": 14974, - "liaison": 14975, - "cooler": 14976, - "cares": 14977, - "headache": 14978, - "plantations": 14979, - "gorge": 14980, - "dots": 14981, - "impulse": 14982, - "thickness": 14983, - "ashamed": 14984, - "averaging": 14985, - "kathy": 14986, - "obligation": 14987, - "precursor": 14988, - "137": 14989, - "fowler": 14990, - "symmetry": 14991, - "thee": 14992, - "225": 14993, - "hears": 14994, - "##rai": 14995, - "undergoing": 14996, - "ads": 14997, - "butcher": 14998, - "bowler": 14999, - "##lip": 15000, - "cigarettes": 15001, - "subscription": 15002, - "goodness": 15003, - "##ically": 15004, - "browne": 15005, - "##hos": 15006, - "##tech": 15007, - "kyoto": 15008, - "donor": 15009, - "##erty": 15010, - "damaging": 15011, - "friction": 15012, - "drifting": 15013, - "expeditions": 15014, - "hardened": 15015, - "prostitution": 15016, - "152": 15017, - "fauna": 15018, - "blankets": 15019, - "claw": 15020, - "tossing": 15021, - "snarled": 15022, - "butterflies": 15023, - "recruits": 15024, - "investigative": 15025, - "coated": 15026, - "healed": 15027, - "138": 15028, - "communal": 15029, - "hai": 15030, - "xiii": 15031, - "academics": 15032, - "boone": 15033, - "psychologist": 15034, - "restless": 15035, - "lahore": 15036, - "stephens": 15037, - "mba": 15038, - "brendan": 15039, - "foreigners": 15040, - "printer": 15041, - "##pc": 15042, - "ached": 15043, - "explode": 15044, - "27th": 15045, - "deed": 15046, - "scratched": 15047, - "dared": 15048, - "##pole": 15049, - "cardiac": 15050, - "1780": 15051, - "okinawa": 15052, - "proto": 15053, - "commando": 15054, - "compelled": 15055, - "oddly": 15056, - "electrons": 15057, - "##base": 15058, - "replica": 15059, - "thanksgiving": 15060, - "##rist": 15061, - "sheila": 15062, - "deliberate": 15063, - "stafford": 15064, - "tidal": 15065, - "representations": 15066, - "hercules": 15067, - "ou": 15068, - "##path": 15069, - "##iated": 15070, - "kidnapping": 15071, - "lenses": 15072, - "##tling": 15073, - "deficit": 15074, - "samoa": 15075, - "mouths": 15076, - "consuming": 15077, - "computational": 15078, - "maze": 15079, - "granting": 15080, - "smirk": 15081, - "razor": 15082, - "fixture": 15083, - "ideals": 15084, - "inviting": 15085, - "aiden": 15086, - "nominal": 15087, - "##vs": 15088, - "issuing": 15089, - "julio": 15090, - "pitt": 15091, - "ramsey": 15092, - "docks": 15093, - "##oss": 15094, - "exhaust": 15095, - "##owed": 15096, - "bavarian": 15097, - "draped": 15098, - "anterior": 15099, - "mating": 15100, - "ethiopian": 15101, - "explores": 15102, - "noticing": 15103, - "##nton": 15104, - "discarded": 15105, - "convenience": 15106, - "hoffman": 15107, - "endowment": 15108, - "beasts": 15109, - "cartridge": 15110, - "mormon": 15111, - "paternal": 15112, - "probe": 15113, - "sleeves": 15114, - "interfere": 15115, - "lump": 15116, - "deadline": 15117, - "##rail": 15118, - "jenks": 15119, - "bulldogs": 15120, - "scrap": 15121, - "alternating": 15122, - "justified": 15123, - "reproductive": 15124, - "nam": 15125, - "seize": 15126, - "descending": 15127, - "secretariat": 15128, - "kirby": 15129, - "coupe": 15130, - "grouped": 15131, - "smash": 15132, - "panther": 15133, - "sedan": 15134, - "tapping": 15135, - "##18": 15136, - "lola": 15137, - "cheer": 15138, - "germanic": 15139, - "unfortunate": 15140, - "##eter": 15141, - "unrelated": 15142, - "##fan": 15143, - "subordinate": 15144, - "##sdale": 15145, - "suzanne": 15146, - "advertisement": 15147, - "##ility": 15148, - "horsepower": 15149, - "##lda": 15150, - "cautiously": 15151, - "discourse": 15152, - "luigi": 15153, - "##mans": 15154, - "##fields": 15155, - "noun": 15156, - "prevalent": 15157, - "mao": 15158, - "schneider": 15159, - "everett": 15160, - "surround": 15161, - "governorate": 15162, - "kira": 15163, - "##avia": 15164, - "westward": 15165, - "##take": 15166, - "misty": 15167, - "rails": 15168, - "sustainability": 15169, - "134": 15170, - "unused": 15171, - "##rating": 15172, - "packs": 15173, - "toast": 15174, - "unwilling": 15175, - "regulate": 15176, - "thy": 15177, - "suffrage": 15178, - "nile": 15179, - "awe": 15180, - "assam": 15181, - "definitions": 15182, - "travelers": 15183, - "affordable": 15184, - "##rb": 15185, - "conferred": 15186, - "sells": 15187, - "undefeated": 15188, - "beneficial": 15189, - "torso": 15190, - "basal": 15191, - "repeating": 15192, - "remixes": 15193, - "##pass": 15194, - "bahrain": 15195, - "cables": 15196, - "fang": 15197, - "##itated": 15198, - "excavated": 15199, - "numbering": 15200, - "statutory": 15201, - "##rey": 15202, - "deluxe": 15203, - "##lian": 15204, - "forested": 15205, - "ramirez": 15206, - "derbyshire": 15207, - "zeus": 15208, - "slamming": 15209, - "transfers": 15210, - "astronomer": 15211, - "banana": 15212, - "lottery": 15213, - "berg": 15214, - "histories": 15215, - "bamboo": 15216, - "##uchi": 15217, - "resurrection": 15218, - "posterior": 15219, - "bowls": 15220, - "vaguely": 15221, - "##thi": 15222, - "thou": 15223, - "preserving": 15224, - "tensed": 15225, - "offence": 15226, - "##inas": 15227, - "meyrick": 15228, - "callum": 15229, - "ridden": 15230, - "watt": 15231, - "langdon": 15232, - "tying": 15233, - "lowland": 15234, - "snorted": 15235, - "daring": 15236, - "truman": 15237, - "##hale": 15238, - "##girl": 15239, - "aura": 15240, - "overly": 15241, - "filing": 15242, - "weighing": 15243, - "goa": 15244, - "infections": 15245, - "philanthropist": 15246, - "saunders": 15247, - "eponymous": 15248, - "##owski": 15249, - "latitude": 15250, - "perspectives": 15251, - "reviewing": 15252, - "mets": 15253, - "commandant": 15254, - "radial": 15255, - "##kha": 15256, - "flashlight": 15257, - "reliability": 15258, - "koch": 15259, - "vowels": 15260, - "amazed": 15261, - "ada": 15262, - "elaine": 15263, - "supper": 15264, - "##rth": 15265, - "##encies": 15266, - "predator": 15267, - "debated": 15268, - "soviets": 15269, - "cola": 15270, - "##boards": 15271, - "##nah": 15272, - "compartment": 15273, - "crooked": 15274, - "arbitrary": 15275, - "fourteenth": 15276, - "##ctive": 15277, - "havana": 15278, - "majors": 15279, - "steelers": 15280, - "clips": 15281, - "profitable": 15282, - "ambush": 15283, - "exited": 15284, - "packers": 15285, - "##tile": 15286, - "nude": 15287, - "cracks": 15288, - "fungi": 15289, - "##е": 15290, - "limb": 15291, - "trousers": 15292, - "josie": 15293, - "shelby": 15294, - "tens": 15295, - "frederic": 15296, - "##ος": 15297, - "definite": 15298, - "smoothly": 15299, - "constellation": 15300, - "insult": 15301, - "baton": 15302, - "discs": 15303, - "lingering": 15304, - "##nco": 15305, - "conclusions": 15306, - "lent": 15307, - "staging": 15308, - "becker": 15309, - "grandpa": 15310, - "shaky": 15311, - "##tron": 15312, - "einstein": 15313, - "obstacles": 15314, - "sk": 15315, - "adverse": 15316, - "elle": 15317, - "economically": 15318, - "##moto": 15319, - "mccartney": 15320, - "thor": 15321, - "dismissal": 15322, - "motions": 15323, - "readings": 15324, - "nostrils": 15325, - "treatise": 15326, - "##pace": 15327, - "squeezing": 15328, - "evidently": 15329, - "prolonged": 15330, - "1783": 15331, - "venezuelan": 15332, - "je": 15333, - "marguerite": 15334, - "beirut": 15335, - "takeover": 15336, - "shareholders": 15337, - "##vent": 15338, - "denise": 15339, - "digit": 15340, - "airplay": 15341, - "norse": 15342, - "##bbling": 15343, - "imaginary": 15344, - "pills": 15345, - "hubert": 15346, - "blaze": 15347, - "vacated": 15348, - "eliminating": 15349, - "##ello": 15350, - "vine": 15351, - "mansfield": 15352, - "##tty": 15353, - "retrospective": 15354, - "barrow": 15355, - "borne": 15356, - "clutch": 15357, - "bail": 15358, - "forensic": 15359, - "weaving": 15360, - "##nett": 15361, - "##witz": 15362, - "desktop": 15363, - "citadel": 15364, - "promotions": 15365, - "worrying": 15366, - "dorset": 15367, - "ieee": 15368, - "subdivided": 15369, - "##iating": 15370, - "manned": 15371, - "expeditionary": 15372, - "pickup": 15373, - "synod": 15374, - "chuckle": 15375, - "185": 15376, - "barney": 15377, - "##rz": 15378, - "##ffin": 15379, - "functionality": 15380, - "karachi": 15381, - "litigation": 15382, - "meanings": 15383, - "uc": 15384, - "lick": 15385, - "turbo": 15386, - "anders": 15387, - "##ffed": 15388, - "execute": 15389, - "curl": 15390, - "oppose": 15391, - "ankles": 15392, - "typhoon": 15393, - "##د": 15394, - "##ache": 15395, - "##asia": 15396, - "linguistics": 15397, - "compassion": 15398, - "pressures": 15399, - "grazing": 15400, - "perfection": 15401, - "##iting": 15402, - "immunity": 15403, - "monopoly": 15404, - "muddy": 15405, - "backgrounds": 15406, - "136": 15407, - "namibia": 15408, - "francesca": 15409, - "monitors": 15410, - "attracting": 15411, - "stunt": 15412, - "tuition": 15413, - "##ии": 15414, - "vegetable": 15415, - "##mates": 15416, - "##quent": 15417, - "mgm": 15418, - "jen": 15419, - "complexes": 15420, - "forts": 15421, - "##ond": 15422, - "cellar": 15423, - "bites": 15424, - "seventeenth": 15425, - "royals": 15426, - "flemish": 15427, - "failures": 15428, - "mast": 15429, - "charities": 15430, - "##cular": 15431, - "peruvian": 15432, - "capitals": 15433, - "macmillan": 15434, - "ipswich": 15435, - "outward": 15436, - "frigate": 15437, - "postgraduate": 15438, - "folds": 15439, - "employing": 15440, - "##ouse": 15441, - "concurrently": 15442, - "fiery": 15443, - "##tai": 15444, - "contingent": 15445, - "nightmares": 15446, - "monumental": 15447, - "nicaragua": 15448, - "##kowski": 15449, - "lizard": 15450, - "mal": 15451, - "fielding": 15452, - "gig": 15453, - "reject": 15454, - "##pad": 15455, - "harding": 15456, - "##ipe": 15457, - "coastline": 15458, - "##cin": 15459, - "##nos": 15460, - "beethoven": 15461, - "humphrey": 15462, - "innovations": 15463, - "##tam": 15464, - "##nge": 15465, - "norris": 15466, - "doris": 15467, - "solicitor": 15468, - "huang": 15469, - "obey": 15470, - "141": 15471, - "##lc": 15472, - "niagara": 15473, - "##tton": 15474, - "shelves": 15475, - "aug": 15476, - "bourbon": 15477, - "curry": 15478, - "nightclub": 15479, - "specifications": 15480, - "hilton": 15481, - "##ndo": 15482, - "centennial": 15483, - "dispersed": 15484, - "worm": 15485, - "neglected": 15486, - "briggs": 15487, - "sm": 15488, - "font": 15489, - "kuala": 15490, - "uneasy": 15491, - "plc": 15492, - "##nstein": 15493, - "##bound": 15494, - "##aking": 15495, - "##burgh": 15496, - "awaiting": 15497, - "pronunciation": 15498, - "##bbed": 15499, - "##quest": 15500, - "eh": 15501, - "optimal": 15502, - "zhu": 15503, - "raped": 15504, - "greens": 15505, - "presided": 15506, - "brenda": 15507, - "worries": 15508, - "##life": 15509, - "venetian": 15510, - "marxist": 15511, - "turnout": 15512, - "##lius": 15513, - "refined": 15514, - "braced": 15515, - "sins": 15516, - "grasped": 15517, - "sunderland": 15518, - "nickel": 15519, - "speculated": 15520, - "lowell": 15521, - "cyrillic": 15522, - "communism": 15523, - "fundraising": 15524, - "resembling": 15525, - "colonists": 15526, - "mutant": 15527, - "freddie": 15528, - "usc": 15529, - "##mos": 15530, - "gratitude": 15531, - "##run": 15532, - "mural": 15533, - "##lous": 15534, - "chemist": 15535, - "wi": 15536, - "reminds": 15537, - "28th": 15538, - "steals": 15539, - "tess": 15540, - "pietro": 15541, - "##ingen": 15542, - "promoter": 15543, - "ri": 15544, - "microphone": 15545, - "honoured": 15546, - "rai": 15547, - "sant": 15548, - "##qui": 15549, - "feather": 15550, - "##nson": 15551, - "burlington": 15552, - "kurdish": 15553, - "terrorists": 15554, - "deborah": 15555, - "sickness": 15556, - "##wed": 15557, - "##eet": 15558, - "hazard": 15559, - "irritated": 15560, - "desperation": 15561, - "veil": 15562, - "clarity": 15563, - "##rik": 15564, - "jewels": 15565, - "xv": 15566, - "##gged": 15567, - "##ows": 15568, - "##cup": 15569, - "berkshire": 15570, - "unfair": 15571, - "mysteries": 15572, - "orchid": 15573, - "winced": 15574, - "exhaustion": 15575, - "renovations": 15576, - "stranded": 15577, - "obe": 15578, - "infinity": 15579, - "##nies": 15580, - "adapt": 15581, - "redevelopment": 15582, - "thanked": 15583, - "registry": 15584, - "olga": 15585, - "domingo": 15586, - "noir": 15587, - "tudor": 15588, - "ole": 15589, - "##atus": 15590, - "commenting": 15591, - "behaviors": 15592, - "##ais": 15593, - "crisp": 15594, - "pauline": 15595, - "probable": 15596, - "stirling": 15597, - "wigan": 15598, - "##bian": 15599, - "paralympics": 15600, - "panting": 15601, - "surpassed": 15602, - "##rew": 15603, - "luca": 15604, - "barred": 15605, - "pony": 15606, - "famed": 15607, - "##sters": 15608, - "cassandra": 15609, - "waiter": 15610, - "carolyn": 15611, - "exported": 15612, - "##orted": 15613, - "andres": 15614, - "destructive": 15615, - "deeds": 15616, - "jonah": 15617, - "castles": 15618, - "vacancy": 15619, - "suv": 15620, - "##glass": 15621, - "1788": 15622, - "orchard": 15623, - "yep": 15624, - "famine": 15625, - "belarusian": 15626, - "sprang": 15627, - "##forth": 15628, - "skinny": 15629, - "##mis": 15630, - "administrators": 15631, - "rotterdam": 15632, - "zambia": 15633, - "zhao": 15634, - "boiler": 15635, - "discoveries": 15636, - "##ride": 15637, - "##physics": 15638, - "lucius": 15639, - "disappointing": 15640, - "outreach": 15641, - "spoon": 15642, - "##frame": 15643, - "qualifications": 15644, - "unanimously": 15645, - "enjoys": 15646, - "regency": 15647, - "##iidae": 15648, - "stade": 15649, - "realism": 15650, - "veterinary": 15651, - "rodgers": 15652, - "dump": 15653, - "alain": 15654, - "chestnut": 15655, - "castile": 15656, - "censorship": 15657, - "rumble": 15658, - "gibbs": 15659, - "##itor": 15660, - "communion": 15661, - "reggae": 15662, - "inactivated": 15663, - "logs": 15664, - "loads": 15665, - "##houses": 15666, - "homosexual": 15667, - "##iano": 15668, - "ale": 15669, - "informs": 15670, - "##cas": 15671, - "phrases": 15672, - "plaster": 15673, - "linebacker": 15674, - "ambrose": 15675, - "kaiser": 15676, - "fascinated": 15677, - "850": 15678, - "limerick": 15679, - "recruitment": 15680, - "forge": 15681, - "mastered": 15682, - "##nding": 15683, - "leinster": 15684, - "rooted": 15685, - "threaten": 15686, - "##strom": 15687, - "borneo": 15688, - "##hes": 15689, - "suggestions": 15690, - "scholarships": 15691, - "propeller": 15692, - "documentaries": 15693, - "patronage": 15694, - "coats": 15695, - "constructing": 15696, - "invest": 15697, - "neurons": 15698, - "comet": 15699, - "entirety": 15700, - "shouts": 15701, - "identities": 15702, - "annoying": 15703, - "unchanged": 15704, - "wary": 15705, - "##antly": 15706, - "##ogy": 15707, - "neat": 15708, - "oversight": 15709, - "##kos": 15710, - "phillies": 15711, - "replay": 15712, - "constance": 15713, - "##kka": 15714, - "incarnation": 15715, - "humble": 15716, - "skies": 15717, - "minus": 15718, - "##acy": 15719, - "smithsonian": 15720, - "##chel": 15721, - "guerrilla": 15722, - "jar": 15723, - "cadets": 15724, - "##plate": 15725, - "surplus": 15726, - "audit": 15727, - "##aru": 15728, - "cracking": 15729, - "joanna": 15730, - "louisa": 15731, - "pacing": 15732, - "##lights": 15733, - "intentionally": 15734, - "##iri": 15735, - "diner": 15736, - "nwa": 15737, - "imprint": 15738, - "australians": 15739, - "tong": 15740, - "unprecedented": 15741, - "bunker": 15742, - "naive": 15743, - "specialists": 15744, - "ark": 15745, - "nichols": 15746, - "railing": 15747, - "leaked": 15748, - "pedal": 15749, - "##uka": 15750, - "shrub": 15751, - "longing": 15752, - "roofs": 15753, - "v8": 15754, - "captains": 15755, - "neural": 15756, - "tuned": 15757, - "##ntal": 15758, - "##jet": 15759, - "emission": 15760, - "medina": 15761, - "frantic": 15762, - "codex": 15763, - "definitive": 15764, - "sid": 15765, - "abolition": 15766, - "intensified": 15767, - "stocks": 15768, - "enrique": 15769, - "sustain": 15770, - "genoa": 15771, - "oxide": 15772, - "##written": 15773, - "clues": 15774, - "cha": 15775, - "##gers": 15776, - "tributaries": 15777, - "fragment": 15778, - "venom": 15779, - "##rity": 15780, - "##ente": 15781, - "##sca": 15782, - "muffled": 15783, - "vain": 15784, - "sire": 15785, - "laos": 15786, - "##ingly": 15787, - "##hana": 15788, - "hastily": 15789, - "snapping": 15790, - "surfaced": 15791, - "sentiment": 15792, - "motive": 15793, - "##oft": 15794, - "contests": 15795, - "approximate": 15796, - "mesa": 15797, - "luckily": 15798, - "dinosaur": 15799, - "exchanges": 15800, - "propelled": 15801, - "accord": 15802, - "bourne": 15803, - "relieve": 15804, - "tow": 15805, - "masks": 15806, - "offended": 15807, - "##ues": 15808, - "cynthia": 15809, - "##mmer": 15810, - "rains": 15811, - "bartender": 15812, - "zinc": 15813, - "reviewers": 15814, - "lois": 15815, - "##sai": 15816, - "legged": 15817, - "arrogant": 15818, - "rafe": 15819, - "rosie": 15820, - "comprise": 15821, - "handicap": 15822, - "blockade": 15823, - "inlet": 15824, - "lagoon": 15825, - "copied": 15826, - "drilling": 15827, - "shelley": 15828, - "petals": 15829, - "##inian": 15830, - "mandarin": 15831, - "obsolete": 15832, - "##inated": 15833, - "onward": 15834, - "arguably": 15835, - "productivity": 15836, - "cindy": 15837, - "praising": 15838, - "seldom": 15839, - "busch": 15840, - "discusses": 15841, - "raleigh": 15842, - "shortage": 15843, - "ranged": 15844, - "stanton": 15845, - "encouragement": 15846, - "firstly": 15847, - "conceded": 15848, - "overs": 15849, - "temporal": 15850, - "##uke": 15851, - "cbe": 15852, - "##bos": 15853, - "woo": 15854, - "certainty": 15855, - "pumps": 15856, - "##pton": 15857, - "stalked": 15858, - "##uli": 15859, - "lizzie": 15860, - "periodic": 15861, - "thieves": 15862, - "weaker": 15863, - "##night": 15864, - "gases": 15865, - "shoving": 15866, - "chooses": 15867, - "wc": 15868, - "##chemical": 15869, - "prompting": 15870, - "weights": 15871, - "##kill": 15872, - "robust": 15873, - "flanked": 15874, - "sticky": 15875, - "hu": 15876, - "tuberculosis": 15877, - "##eb": 15878, - "##eal": 15879, - "christchurch": 15880, - "resembled": 15881, - "wallet": 15882, - "reese": 15883, - "inappropriate": 15884, - "pictured": 15885, - "distract": 15886, - "fixing": 15887, - "fiddle": 15888, - "giggled": 15889, - "burger": 15890, - "heirs": 15891, - "hairy": 15892, - "mechanic": 15893, - "torque": 15894, - "apache": 15895, - "obsessed": 15896, - "chiefly": 15897, - "cheng": 15898, - "logging": 15899, - "##tag": 15900, - "extracted": 15901, - "meaningful": 15902, - "numb": 15903, - "##vsky": 15904, - "gloucestershire": 15905, - "reminding": 15906, - "##bay": 15907, - "unite": 15908, - "##lit": 15909, - "breeds": 15910, - "diminished": 15911, - "clown": 15912, - "glove": 15913, - "1860s": 15914, - "##ن": 15915, - "##ug": 15916, - "archibald": 15917, - "focal": 15918, - "freelance": 15919, - "sliced": 15920, - "depiction": 15921, - "##yk": 15922, - "organism": 15923, - "switches": 15924, - "sights": 15925, - "stray": 15926, - "crawling": 15927, - "##ril": 15928, - "lever": 15929, - "leningrad": 15930, - "interpretations": 15931, - "loops": 15932, - "anytime": 15933, - "reel": 15934, - "alicia": 15935, - "delighted": 15936, - "##ech": 15937, - "inhaled": 15938, - "xiv": 15939, - "suitcase": 15940, - "bernie": 15941, - "vega": 15942, - "licenses": 15943, - "northampton": 15944, - "exclusion": 15945, - "induction": 15946, - "monasteries": 15947, - "racecourse": 15948, - "homosexuality": 15949, - "##right": 15950, - "##sfield": 15951, - "##rky": 15952, - "dimitri": 15953, - "michele": 15954, - "alternatives": 15955, - "ions": 15956, - "commentators": 15957, - "genuinely": 15958, - "objected": 15959, - "pork": 15960, - "hospitality": 15961, - "fencing": 15962, - "stephan": 15963, - "warships": 15964, - "peripheral": 15965, - "wit": 15966, - "drunken": 15967, - "wrinkled": 15968, - "quentin": 15969, - "spends": 15970, - "departing": 15971, - "chung": 15972, - "numerical": 15973, - "spokesperson": 15974, - "##zone": 15975, - "johannesburg": 15976, - "caliber": 15977, - "killers": 15978, - "##udge": 15979, - "assumes": 15980, - "neatly": 15981, - "demographic": 15982, - "abigail": 15983, - "bloc": 15984, - "##vel": 15985, - "mounting": 15986, - "##lain": 15987, - "bentley": 15988, - "slightest": 15989, - "xu": 15990, - "recipients": 15991, - "##jk": 15992, - "merlin": 15993, - "##writer": 15994, - "seniors": 15995, - "prisons": 15996, - "blinking": 15997, - "hindwings": 15998, - "flickered": 15999, - "kappa": 16000, - "##hel": 16001, - "80s": 16002, - "strengthening": 16003, - "appealing": 16004, - "brewing": 16005, - "gypsy": 16006, - "mali": 16007, - "lashes": 16008, - "hulk": 16009, - "unpleasant": 16010, - "harassment": 16011, - "bio": 16012, - "treaties": 16013, - "predict": 16014, - "instrumentation": 16015, - "pulp": 16016, - "troupe": 16017, - "boiling": 16018, - "mantle": 16019, - "##ffe": 16020, - "ins": 16021, - "##vn": 16022, - "dividing": 16023, - "handles": 16024, - "verbs": 16025, - "##onal": 16026, - "coconut": 16027, - "senegal": 16028, - "340": 16029, - "thorough": 16030, - "gum": 16031, - "momentarily": 16032, - "##sto": 16033, - "cocaine": 16034, - "panicked": 16035, - "destined": 16036, - "##turing": 16037, - "teatro": 16038, - "denying": 16039, - "weary": 16040, - "captained": 16041, - "mans": 16042, - "##hawks": 16043, - "##code": 16044, - "wakefield": 16045, - "bollywood": 16046, - "thankfully": 16047, - "##16": 16048, - "cyril": 16049, - "##wu": 16050, - "amendments": 16051, - "##bahn": 16052, - "consultation": 16053, - "stud": 16054, - "reflections": 16055, - "kindness": 16056, - "1787": 16057, - "internally": 16058, - "##ovo": 16059, - "tex": 16060, - "mosaic": 16061, - "distribute": 16062, - "paddy": 16063, - "seeming": 16064, - "143": 16065, - "##hic": 16066, - "piers": 16067, - "##15": 16068, - "##mura": 16069, - "##verse": 16070, - "popularly": 16071, - "winger": 16072, - "kang": 16073, - "sentinel": 16074, - "mccoy": 16075, - "##anza": 16076, - "covenant": 16077, - "##bag": 16078, - "verge": 16079, - "fireworks": 16080, - "suppress": 16081, - "thrilled": 16082, - "dominate": 16083, - "##jar": 16084, - "swansea": 16085, - "##60": 16086, - "142": 16087, - "reconciliation": 16088, - "##ndi": 16089, - "stiffened": 16090, - "cue": 16091, - "dorian": 16092, - "##uf": 16093, - "damascus": 16094, - "amor": 16095, - "ida": 16096, - "foremost": 16097, - "##aga": 16098, - "porsche": 16099, - "unseen": 16100, - "dir": 16101, - "##had": 16102, - "##azi": 16103, - "stony": 16104, - "lexi": 16105, - "melodies": 16106, - "##nko": 16107, - "angular": 16108, - "integer": 16109, - "podcast": 16110, - "ants": 16111, - "inherent": 16112, - "jaws": 16113, - "justify": 16114, - "persona": 16115, - "##olved": 16116, - "josephine": 16117, - "##nr": 16118, - "##ressed": 16119, - "customary": 16120, - "flashes": 16121, - "gala": 16122, - "cyrus": 16123, - "glaring": 16124, - "backyard": 16125, - "ariel": 16126, - "physiology": 16127, - "greenland": 16128, - "html": 16129, - "stir": 16130, - "avon": 16131, - "atletico": 16132, - "finch": 16133, - "methodology": 16134, - "ked": 16135, - "##lent": 16136, - "mas": 16137, - "catholicism": 16138, - "townsend": 16139, - "branding": 16140, - "quincy": 16141, - "fits": 16142, - "containers": 16143, - "1777": 16144, - "ashore": 16145, - "aragon": 16146, - "##19": 16147, - "forearm": 16148, - "poisoning": 16149, - "##sd": 16150, - "adopting": 16151, - "conquer": 16152, - "grinding": 16153, - "amnesty": 16154, - "keller": 16155, - "finances": 16156, - "evaluate": 16157, - "forged": 16158, - "lankan": 16159, - "instincts": 16160, - "##uto": 16161, - "guam": 16162, - "bosnian": 16163, - "photographed": 16164, - "workplace": 16165, - "desirable": 16166, - "protector": 16167, - "##dog": 16168, - "allocation": 16169, - "intently": 16170, - "encourages": 16171, - "willy": 16172, - "##sten": 16173, - "bodyguard": 16174, - "electro": 16175, - "brighter": 16176, - "##ν": 16177, - "bihar": 16178, - "##chev": 16179, - "lasts": 16180, - "opener": 16181, - "amphibious": 16182, - "sal": 16183, - "verde": 16184, - "arte": 16185, - "##cope": 16186, - "captivity": 16187, - "vocabulary": 16188, - "yields": 16189, - "##tted": 16190, - "agreeing": 16191, - "desmond": 16192, - "pioneered": 16193, - "##chus": 16194, - "strap": 16195, - "campaigned": 16196, - "railroads": 16197, - "##ович": 16198, - "emblem": 16199, - "##dre": 16200, - "stormed": 16201, - "501": 16202, - "##ulous": 16203, - "marijuana": 16204, - "northumberland": 16205, - "##gn": 16206, - "##nath": 16207, - "bowen": 16208, - "landmarks": 16209, - "beaumont": 16210, - "##qua": 16211, - "danube": 16212, - "##bler": 16213, - "attorneys": 16214, - "th": 16215, - "ge": 16216, - "flyers": 16217, - "critique": 16218, - "villains": 16219, - "cass": 16220, - "mutation": 16221, - "acc": 16222, - "##0s": 16223, - "colombo": 16224, - "mckay": 16225, - "motif": 16226, - "sampling": 16227, - "concluding": 16228, - "syndicate": 16229, - "##rell": 16230, - "neon": 16231, - "stables": 16232, - "ds": 16233, - "warnings": 16234, - "clint": 16235, - "mourning": 16236, - "wilkinson": 16237, - "##tated": 16238, - "merrill": 16239, - "leopard": 16240, - "evenings": 16241, - "exhaled": 16242, - "emil": 16243, - "sonia": 16244, - "ezra": 16245, - "discrete": 16246, - "stove": 16247, - "farrell": 16248, - "fifteenth": 16249, - "prescribed": 16250, - "superhero": 16251, - "##rier": 16252, - "worms": 16253, - "helm": 16254, - "wren": 16255, - "##duction": 16256, - "##hc": 16257, - "expo": 16258, - "##rator": 16259, - "hq": 16260, - "unfamiliar": 16261, - "antony": 16262, - "prevents": 16263, - "acceleration": 16264, - "fiercely": 16265, - "mari": 16266, - "painfully": 16267, - "calculations": 16268, - "cheaper": 16269, - "ign": 16270, - "clifton": 16271, - "irvine": 16272, - "davenport": 16273, - "mozambique": 16274, - "##np": 16275, - "pierced": 16276, - "##evich": 16277, - "wonders": 16278, - "##wig": 16279, - "##cate": 16280, - "##iling": 16281, - "crusade": 16282, - "ware": 16283, - "##uel": 16284, - "enzymes": 16285, - "reasonably": 16286, - "mls": 16287, - "##coe": 16288, - "mater": 16289, - "ambition": 16290, - "bunny": 16291, - "eliot": 16292, - "kernel": 16293, - "##fin": 16294, - "asphalt": 16295, - "headmaster": 16296, - "torah": 16297, - "aden": 16298, - "lush": 16299, - "pins": 16300, - "waived": 16301, - "##care": 16302, - "##yas": 16303, - "joao": 16304, - "substrate": 16305, - "enforce": 16306, - "##grad": 16307, - "##ules": 16308, - "alvarez": 16309, - "selections": 16310, - "epidemic": 16311, - "tempted": 16312, - "##bit": 16313, - "bremen": 16314, - "translates": 16315, - "ensured": 16316, - "waterfront": 16317, - "29th": 16318, - "forrest": 16319, - "manny": 16320, - "malone": 16321, - "kramer": 16322, - "reigning": 16323, - "cookies": 16324, - "simpler": 16325, - "absorption": 16326, - "205": 16327, - "engraved": 16328, - "##ffy": 16329, - "evaluated": 16330, - "1778": 16331, - "haze": 16332, - "146": 16333, - "comforting": 16334, - "crossover": 16335, - "##abe": 16336, - "thorn": 16337, - "##rift": 16338, - "##imo": 16339, - "##pop": 16340, - "suppression": 16341, - "fatigue": 16342, - "cutter": 16343, - "##tr": 16344, - "201": 16345, - "wurttemberg": 16346, - "##orf": 16347, - "enforced": 16348, - "hovering": 16349, - "proprietary": 16350, - "gb": 16351, - "samurai": 16352, - "syllable": 16353, - "ascent": 16354, - "lacey": 16355, - "tick": 16356, - "lars": 16357, - "tractor": 16358, - "merchandise": 16359, - "rep": 16360, - "bouncing": 16361, - "defendants": 16362, - "##yre": 16363, - "huntington": 16364, - "##ground": 16365, - "##oko": 16366, - "standardized": 16367, - "##hor": 16368, - "##hima": 16369, - "assassinated": 16370, - "nu": 16371, - "predecessors": 16372, - "rainy": 16373, - "liar": 16374, - "assurance": 16375, - "lyrical": 16376, - "##uga": 16377, - "secondly": 16378, - "flattened": 16379, - "ios": 16380, - "parameter": 16381, - "undercover": 16382, - "##mity": 16383, - "bordeaux": 16384, - "punish": 16385, - "ridges": 16386, - "markers": 16387, - "exodus": 16388, - "inactive": 16389, - "hesitate": 16390, - "debbie": 16391, - "nyc": 16392, - "pledge": 16393, - "savoy": 16394, - "nagar": 16395, - "offset": 16396, - "organist": 16397, - "##tium": 16398, - "hesse": 16399, - "marin": 16400, - "converting": 16401, - "##iver": 16402, - "diagram": 16403, - "propulsion": 16404, - "pu": 16405, - "validity": 16406, - "reverted": 16407, - "supportive": 16408, - "##dc": 16409, - "ministries": 16410, - "clans": 16411, - "responds": 16412, - "proclamation": 16413, - "##inae": 16414, - "##ø": 16415, - "##rea": 16416, - "ein": 16417, - "pleading": 16418, - "patriot": 16419, - "sf": 16420, - "birch": 16421, - "islanders": 16422, - "strauss": 16423, - "hates": 16424, - "##dh": 16425, - "brandenburg": 16426, - "concession": 16427, - "rd": 16428, - "##ob": 16429, - "1900s": 16430, - "killings": 16431, - "textbook": 16432, - "antiquity": 16433, - "cinematography": 16434, - "wharf": 16435, - "embarrassing": 16436, - "setup": 16437, - "creed": 16438, - "farmland": 16439, - "inequality": 16440, - "centred": 16441, - "signatures": 16442, - "fallon": 16443, - "370": 16444, - "##ingham": 16445, - "##uts": 16446, - "ceylon": 16447, - "gazing": 16448, - "directive": 16449, - "laurie": 16450, - "##tern": 16451, - "globally": 16452, - "##uated": 16453, - "##dent": 16454, - "allah": 16455, - "excavation": 16456, - "threads": 16457, - "##cross": 16458, - "148": 16459, - "frantically": 16460, - "icc": 16461, - "utilize": 16462, - "determines": 16463, - "respiratory": 16464, - "thoughtful": 16465, - "receptions": 16466, - "##dicate": 16467, - "merging": 16468, - "chandra": 16469, - "seine": 16470, - "147": 16471, - "builders": 16472, - "builds": 16473, - "diagnostic": 16474, - "dev": 16475, - "visibility": 16476, - "goddamn": 16477, - "analyses": 16478, - "dhaka": 16479, - "cho": 16480, - "proves": 16481, - "chancel": 16482, - "concurrent": 16483, - "curiously": 16484, - "canadians": 16485, - "pumped": 16486, - "restoring": 16487, - "1850s": 16488, - "turtles": 16489, - "jaguar": 16490, - "sinister": 16491, - "spinal": 16492, - "traction": 16493, - "declan": 16494, - "vows": 16495, - "1784": 16496, - "glowed": 16497, - "capitalism": 16498, - "swirling": 16499, - "install": 16500, - "universidad": 16501, - "##lder": 16502, - "##oat": 16503, - "soloist": 16504, - "##genic": 16505, - "##oor": 16506, - "coincidence": 16507, - "beginnings": 16508, - "nissan": 16509, - "dip": 16510, - "resorts": 16511, - "caucasus": 16512, - "combustion": 16513, - "infectious": 16514, - "##eno": 16515, - "pigeon": 16516, - "serpent": 16517, - "##itating": 16518, - "conclude": 16519, - "masked": 16520, - "salad": 16521, - "jew": 16522, - "##gr": 16523, - "surreal": 16524, - "toni": 16525, - "##wc": 16526, - "harmonica": 16527, - "151": 16528, - "##gins": 16529, - "##etic": 16530, - "##coat": 16531, - "fishermen": 16532, - "intending": 16533, - "bravery": 16534, - "##wave": 16535, - "klaus": 16536, - "titan": 16537, - "wembley": 16538, - "taiwanese": 16539, - "ransom": 16540, - "40th": 16541, - "incorrect": 16542, - "hussein": 16543, - "eyelids": 16544, - "jp": 16545, - "cooke": 16546, - "dramas": 16547, - "utilities": 16548, - "##etta": 16549, - "##print": 16550, - "eisenhower": 16551, - "principally": 16552, - "granada": 16553, - "lana": 16554, - "##rak": 16555, - "openings": 16556, - "concord": 16557, - "##bl": 16558, - "bethany": 16559, - "connie": 16560, - "morality": 16561, - "sega": 16562, - "##mons": 16563, - "##nard": 16564, - "earnings": 16565, - "##kara": 16566, - "##cine": 16567, - "wii": 16568, - "communes": 16569, - "##rel": 16570, - "coma": 16571, - "composing": 16572, - "softened": 16573, - "severed": 16574, - "grapes": 16575, - "##17": 16576, - "nguyen": 16577, - "analyzed": 16578, - "warlord": 16579, - "hubbard": 16580, - "heavenly": 16581, - "behave": 16582, - "slovenian": 16583, - "##hit": 16584, - "##ony": 16585, - "hailed": 16586, - "filmmakers": 16587, - "trance": 16588, - "caldwell": 16589, - "skye": 16590, - "unrest": 16591, - "coward": 16592, - "likelihood": 16593, - "##aging": 16594, - "bern": 16595, - "sci": 16596, - "taliban": 16597, - "honolulu": 16598, - "propose": 16599, - "##wang": 16600, - "1700": 16601, - "browser": 16602, - "imagining": 16603, - "cobra": 16604, - "contributes": 16605, - "dukes": 16606, - "instinctively": 16607, - "conan": 16608, - "violinist": 16609, - "##ores": 16610, - "accessories": 16611, - "gradual": 16612, - "##amp": 16613, - "quotes": 16614, - "sioux": 16615, - "##dating": 16616, - "undertake": 16617, - "intercepted": 16618, - "sparkling": 16619, - "compressed": 16620, - "139": 16621, - "fungus": 16622, - "tombs": 16623, - "haley": 16624, - "imposing": 16625, - "rests": 16626, - "degradation": 16627, - "lincolnshire": 16628, - "retailers": 16629, - "wetlands": 16630, - "tulsa": 16631, - "distributor": 16632, - "dungeon": 16633, - "nun": 16634, - "greenhouse": 16635, - "convey": 16636, - "atlantis": 16637, - "aft": 16638, - "exits": 16639, - "oman": 16640, - "dresser": 16641, - "lyons": 16642, - "##sti": 16643, - "joking": 16644, - "eddy": 16645, - "judgement": 16646, - "omitted": 16647, - "digits": 16648, - "##cts": 16649, - "##game": 16650, - "juniors": 16651, - "##rae": 16652, - "cents": 16653, - "stricken": 16654, - "une": 16655, - "##ngo": 16656, - "wizards": 16657, - "weir": 16658, - "breton": 16659, - "nan": 16660, - "technician": 16661, - "fibers": 16662, - "liking": 16663, - "royalty": 16664, - "##cca": 16665, - "154": 16666, - "persia": 16667, - "terribly": 16668, - "magician": 16669, - "##rable": 16670, - "##unt": 16671, - "vance": 16672, - "cafeteria": 16673, - "booker": 16674, - "camille": 16675, - "warmer": 16676, - "##static": 16677, - "consume": 16678, - "cavern": 16679, - "gaps": 16680, - "compass": 16681, - "contemporaries": 16682, - "foyer": 16683, - "soothing": 16684, - "graveyard": 16685, - "maj": 16686, - "plunged": 16687, - "blush": 16688, - "##wear": 16689, - "cascade": 16690, - "demonstrates": 16691, - "ordinance": 16692, - "##nov": 16693, - "boyle": 16694, - "##lana": 16695, - "rockefeller": 16696, - "shaken": 16697, - "banjo": 16698, - "izzy": 16699, - "##ense": 16700, - "breathless": 16701, - "vines": 16702, - "##32": 16703, - "##eman": 16704, - "alterations": 16705, - "chromosome": 16706, - "dwellings": 16707, - "feudal": 16708, - "mole": 16709, - "153": 16710, - "catalonia": 16711, - "relics": 16712, - "tenant": 16713, - "mandated": 16714, - "##fm": 16715, - "fridge": 16716, - "hats": 16717, - "honesty": 16718, - "patented": 16719, - "raul": 16720, - "heap": 16721, - "cruisers": 16722, - "accusing": 16723, - "enlightenment": 16724, - "infants": 16725, - "wherein": 16726, - "chatham": 16727, - "contractors": 16728, - "zen": 16729, - "affinity": 16730, - "hc": 16731, - "osborne": 16732, - "piston": 16733, - "156": 16734, - "traps": 16735, - "maturity": 16736, - "##rana": 16737, - "lagos": 16738, - "##zal": 16739, - "peering": 16740, - "##nay": 16741, - "attendant": 16742, - "dealers": 16743, - "protocols": 16744, - "subset": 16745, - "prospects": 16746, - "biographical": 16747, - "##cre": 16748, - "artery": 16749, - "##zers": 16750, - "insignia": 16751, - "nuns": 16752, - "endured": 16753, - "##eration": 16754, - "recommend": 16755, - "schwartz": 16756, - "serbs": 16757, - "berger": 16758, - "cromwell": 16759, - "crossroads": 16760, - "##ctor": 16761, - "enduring": 16762, - "clasped": 16763, - "grounded": 16764, - "##bine": 16765, - "marseille": 16766, - "twitched": 16767, - "abel": 16768, - "choke": 16769, - "https": 16770, - "catalyst": 16771, - "moldova": 16772, - "italians": 16773, - "##tist": 16774, - "disastrous": 16775, - "wee": 16776, - "##oured": 16777, - "##nti": 16778, - "wwf": 16779, - "nope": 16780, - "##piration": 16781, - "##asa": 16782, - "expresses": 16783, - "thumbs": 16784, - "167": 16785, - "##nza": 16786, - "coca": 16787, - "1781": 16788, - "cheating": 16789, - "##ption": 16790, - "skipped": 16791, - "sensory": 16792, - "heidelberg": 16793, - "spies": 16794, - "satan": 16795, - "dangers": 16796, - "semifinal": 16797, - "202": 16798, - "bohemia": 16799, - "whitish": 16800, - "confusing": 16801, - "shipbuilding": 16802, - "relies": 16803, - "surgeons": 16804, - "landings": 16805, - "ravi": 16806, - "baku": 16807, - "moor": 16808, - "suffix": 16809, - "alejandro": 16810, - "##yana": 16811, - "litre": 16812, - "upheld": 16813, - "##unk": 16814, - "rajasthan": 16815, - "##rek": 16816, - "coaster": 16817, - "insists": 16818, - "posture": 16819, - "scenarios": 16820, - "etienne": 16821, - "favoured": 16822, - "appoint": 16823, - "transgender": 16824, - "elephants": 16825, - "poked": 16826, - "greenwood": 16827, - "defences": 16828, - "fulfilled": 16829, - "militant": 16830, - "somali": 16831, - "1758": 16832, - "chalk": 16833, - "potent": 16834, - "##ucci": 16835, - "migrants": 16836, - "wink": 16837, - "assistants": 16838, - "nos": 16839, - "restriction": 16840, - "activism": 16841, - "niger": 16842, - "##ario": 16843, - "colon": 16844, - "shaun": 16845, - "##sat": 16846, - "daphne": 16847, - "##erated": 16848, - "swam": 16849, - "congregations": 16850, - "reprise": 16851, - "considerations": 16852, - "magnet": 16853, - "playable": 16854, - "xvi": 16855, - "##р": 16856, - "overthrow": 16857, - "tobias": 16858, - "knob": 16859, - "chavez": 16860, - "coding": 16861, - "##mers": 16862, - "propped": 16863, - "katrina": 16864, - "orient": 16865, - "newcomer": 16866, - "##suke": 16867, - "temperate": 16868, - "##pool": 16869, - "farmhouse": 16870, - "interrogation": 16871, - "##vd": 16872, - "committing": 16873, - "##vert": 16874, - "forthcoming": 16875, - "strawberry": 16876, - "joaquin": 16877, - "macau": 16878, - "ponds": 16879, - "shocking": 16880, - "siberia": 16881, - "##cellular": 16882, - "chant": 16883, - "contributors": 16884, - "##nant": 16885, - "##ologists": 16886, - "sped": 16887, - "absorb": 16888, - "hail": 16889, - "1782": 16890, - "spared": 16891, - "##hore": 16892, - "barbados": 16893, - "karate": 16894, - "opus": 16895, - "originates": 16896, - "saul": 16897, - "##xie": 16898, - "evergreen": 16899, - "leaped": 16900, - "##rock": 16901, - "correlation": 16902, - "exaggerated": 16903, - "weekday": 16904, - "unification": 16905, - "bump": 16906, - "tracing": 16907, - "brig": 16908, - "afb": 16909, - "pathways": 16910, - "utilizing": 16911, - "##ners": 16912, - "mod": 16913, - "mb": 16914, - "disturbance": 16915, - "kneeling": 16916, - "##stad": 16917, - "##guchi": 16918, - "100th": 16919, - "pune": 16920, - "##thy": 16921, - "decreasing": 16922, - "168": 16923, - "manipulation": 16924, - "miriam": 16925, - "academia": 16926, - "ecosystem": 16927, - "occupational": 16928, - "rbi": 16929, - "##lem": 16930, - "rift": 16931, - "##14": 16932, - "rotary": 16933, - "stacked": 16934, - "incorporation": 16935, - "awakening": 16936, - "generators": 16937, - "guerrero": 16938, - "racist": 16939, - "##omy": 16940, - "cyber": 16941, - "derivatives": 16942, - "culminated": 16943, - "allie": 16944, - "annals": 16945, - "panzer": 16946, - "sainte": 16947, - "wikipedia": 16948, - "pops": 16949, - "zu": 16950, - "austro": 16951, - "##vate": 16952, - "algerian": 16953, - "politely": 16954, - "nicholson": 16955, - "mornings": 16956, - "educate": 16957, - "tastes": 16958, - "thrill": 16959, - "dartmouth": 16960, - "##gating": 16961, - "db": 16962, - "##jee": 16963, - "regan": 16964, - "differing": 16965, - "concentrating": 16966, - "choreography": 16967, - "divinity": 16968, - "##media": 16969, - "pledged": 16970, - "alexandre": 16971, - "routing": 16972, - "gregor": 16973, - "madeline": 16974, - "##idal": 16975, - "apocalypse": 16976, - "##hora": 16977, - "gunfire": 16978, - "culminating": 16979, - "elves": 16980, - "fined": 16981, - "liang": 16982, - "lam": 16983, - "programmed": 16984, - "tar": 16985, - "guessing": 16986, - "transparency": 16987, - "gabrielle": 16988, - "##gna": 16989, - "cancellation": 16990, - "flexibility": 16991, - "##lining": 16992, - "accession": 16993, - "shea": 16994, - "stronghold": 16995, - "nets": 16996, - "specializes": 16997, - "##rgan": 16998, - "abused": 16999, - "hasan": 17000, - "sgt": 17001, - "ling": 17002, - "exceeding": 17003, - "##₄": 17004, - "admiration": 17005, - "supermarket": 17006, - "##ark": 17007, - "photographers": 17008, - "specialised": 17009, - "tilt": 17010, - "resonance": 17011, - "hmm": 17012, - "perfume": 17013, - "380": 17014, - "sami": 17015, - "threatens": 17016, - "garland": 17017, - "botany": 17018, - "guarding": 17019, - "boiled": 17020, - "greet": 17021, - "puppy": 17022, - "russo": 17023, - "supplier": 17024, - "wilmington": 17025, - "vibrant": 17026, - "vijay": 17027, - "##bius": 17028, - "paralympic": 17029, - "grumbled": 17030, - "paige": 17031, - "faa": 17032, - "licking": 17033, - "margins": 17034, - "hurricanes": 17035, - "##gong": 17036, - "fest": 17037, - "grenade": 17038, - "ripping": 17039, - "##uz": 17040, - "counseling": 17041, - "weigh": 17042, - "##sian": 17043, - "needles": 17044, - "wiltshire": 17045, - "edison": 17046, - "costly": 17047, - "##not": 17048, - "fulton": 17049, - "tramway": 17050, - "redesigned": 17051, - "staffordshire": 17052, - "cache": 17053, - "gasping": 17054, - "watkins": 17055, - "sleepy": 17056, - "candidacy": 17057, - "##group": 17058, - "monkeys": 17059, - "timeline": 17060, - "throbbing": 17061, - "##bid": 17062, - "##sos": 17063, - "berth": 17064, - "uzbekistan": 17065, - "vanderbilt": 17066, - "bothering": 17067, - "overturned": 17068, - "ballots": 17069, - "gem": 17070, - "##iger": 17071, - "sunglasses": 17072, - "subscribers": 17073, - "hooker": 17074, - "compelling": 17075, - "ang": 17076, - "exceptionally": 17077, - "saloon": 17078, - "stab": 17079, - "##rdi": 17080, - "carla": 17081, - "terrifying": 17082, - "rom": 17083, - "##vision": 17084, - "coil": 17085, - "##oids": 17086, - "satisfying": 17087, - "vendors": 17088, - "31st": 17089, - "mackay": 17090, - "deities": 17091, - "overlooked": 17092, - "ambient": 17093, - "bahamas": 17094, - "felipe": 17095, - "olympia": 17096, - "whirled": 17097, - "botanist": 17098, - "advertised": 17099, - "tugging": 17100, - "##dden": 17101, - "disciples": 17102, - "morales": 17103, - "unionist": 17104, - "rites": 17105, - "foley": 17106, - "morse": 17107, - "motives": 17108, - "creepy": 17109, - "##₀": 17110, - "soo": 17111, - "##sz": 17112, - "bargain": 17113, - "highness": 17114, - "frightening": 17115, - "turnpike": 17116, - "tory": 17117, - "reorganization": 17118, - "##cer": 17119, - "depict": 17120, - "biographer": 17121, - "##walk": 17122, - "unopposed": 17123, - "manifesto": 17124, - "##gles": 17125, - "institut": 17126, - "emile": 17127, - "accidental": 17128, - "kapoor": 17129, - "##dam": 17130, - "kilkenny": 17131, - "cortex": 17132, - "lively": 17133, - "##13": 17134, - "romanesque": 17135, - "jain": 17136, - "shan": 17137, - "cannons": 17138, - "##ood": 17139, - "##ske": 17140, - "petrol": 17141, - "echoing": 17142, - "amalgamated": 17143, - "disappears": 17144, - "cautious": 17145, - "proposes": 17146, - "sanctions": 17147, - "trenton": 17148, - "##ر": 17149, - "flotilla": 17150, - "aus": 17151, - "contempt": 17152, - "tor": 17153, - "canary": 17154, - "cote": 17155, - "theirs": 17156, - "##hun": 17157, - "conceptual": 17158, - "deleted": 17159, - "fascinating": 17160, - "paso": 17161, - "blazing": 17162, - "elf": 17163, - "honourable": 17164, - "hutchinson": 17165, - "##eiro": 17166, - "##outh": 17167, - "##zin": 17168, - "surveyor": 17169, - "tee": 17170, - "amidst": 17171, - "wooded": 17172, - "reissue": 17173, - "intro": 17174, - "##ono": 17175, - "cobb": 17176, - "shelters": 17177, - "newsletter": 17178, - "hanson": 17179, - "brace": 17180, - "encoding": 17181, - "confiscated": 17182, - "dem": 17183, - "caravan": 17184, - "marino": 17185, - "scroll": 17186, - "melodic": 17187, - "cows": 17188, - "imam": 17189, - "##adi": 17190, - "##aneous": 17191, - "northward": 17192, - "searches": 17193, - "biodiversity": 17194, - "cora": 17195, - "310": 17196, - "roaring": 17197, - "##bers": 17198, - "connell": 17199, - "theologian": 17200, - "halo": 17201, - "compose": 17202, - "pathetic": 17203, - "unmarried": 17204, - "dynamo": 17205, - "##oot": 17206, - "az": 17207, - "calculation": 17208, - "toulouse": 17209, - "deserves": 17210, - "humour": 17211, - "nr": 17212, - "forgiveness": 17213, - "tam": 17214, - "undergone": 17215, - "martyr": 17216, - "pamela": 17217, - "myths": 17218, - "whore": 17219, - "counselor": 17220, - "hicks": 17221, - "290": 17222, - "heavens": 17223, - "battleship": 17224, - "electromagnetic": 17225, - "##bbs": 17226, - "stellar": 17227, - "establishments": 17228, - "presley": 17229, - "hopped": 17230, - "##chin": 17231, - "temptation": 17232, - "90s": 17233, - "wills": 17234, - "nas": 17235, - "##yuan": 17236, - "nhs": 17237, - "##nya": 17238, - "seminars": 17239, - "##yev": 17240, - "adaptations": 17241, - "gong": 17242, - "asher": 17243, - "lex": 17244, - "indicator": 17245, - "sikh": 17246, - "tobago": 17247, - "cites": 17248, - "goin": 17249, - "##yte": 17250, - "satirical": 17251, - "##gies": 17252, - "characterised": 17253, - "correspond": 17254, - "bubbles": 17255, - "lure": 17256, - "participates": 17257, - "##vid": 17258, - "eruption": 17259, - "skate": 17260, - "therapeutic": 17261, - "1785": 17262, - "canals": 17263, - "wholesale": 17264, - "defaulted": 17265, - "sac": 17266, - "460": 17267, - "petit": 17268, - "##zzled": 17269, - "virgil": 17270, - "leak": 17271, - "ravens": 17272, - "256": 17273, - "portraying": 17274, - "##yx": 17275, - "ghetto": 17276, - "creators": 17277, - "dams": 17278, - "portray": 17279, - "vicente": 17280, - "##rington": 17281, - "fae": 17282, - "namesake": 17283, - "bounty": 17284, - "##arium": 17285, - "joachim": 17286, - "##ota": 17287, - "##iser": 17288, - "aforementioned": 17289, - "axle": 17290, - "snout": 17291, - "depended": 17292, - "dismantled": 17293, - "reuben": 17294, - "480": 17295, - "##ibly": 17296, - "gallagher": 17297, - "##lau": 17298, - "##pd": 17299, - "earnest": 17300, - "##ieu": 17301, - "##iary": 17302, - "inflicted": 17303, - "objections": 17304, - "##llar": 17305, - "asa": 17306, - "gritted": 17307, - "##athy": 17308, - "jericho": 17309, - "##sea": 17310, - "##was": 17311, - "flick": 17312, - "underside": 17313, - "ceramics": 17314, - "undead": 17315, - "substituted": 17316, - "195": 17317, - "eastward": 17318, - "undoubtedly": 17319, - "wheeled": 17320, - "chimney": 17321, - "##iche": 17322, - "guinness": 17323, - "cb": 17324, - "##ager": 17325, - "siding": 17326, - "##bell": 17327, - "traitor": 17328, - "baptiste": 17329, - "disguised": 17330, - "inauguration": 17331, - "149": 17332, - "tipperary": 17333, - "choreographer": 17334, - "perched": 17335, - "warmed": 17336, - "stationary": 17337, - "eco": 17338, - "##ike": 17339, - "##ntes": 17340, - "bacterial": 17341, - "##aurus": 17342, - "flores": 17343, - "phosphate": 17344, - "##core": 17345, - "attacker": 17346, - "invaders": 17347, - "alvin": 17348, - "intersects": 17349, - "a1": 17350, - "indirectly": 17351, - "immigrated": 17352, - "businessmen": 17353, - "cornelius": 17354, - "valves": 17355, - "narrated": 17356, - "pill": 17357, - "sober": 17358, - "ul": 17359, - "nationale": 17360, - "monastic": 17361, - "applicants": 17362, - "scenery": 17363, - "##jack": 17364, - "161": 17365, - "motifs": 17366, - "constitutes": 17367, - "cpu": 17368, - "##osh": 17369, - "jurisdictions": 17370, - "sd": 17371, - "tuning": 17372, - "irritation": 17373, - "woven": 17374, - "##uddin": 17375, - "fertility": 17376, - "gao": 17377, - "##erie": 17378, - "antagonist": 17379, - "impatient": 17380, - "glacial": 17381, - "hides": 17382, - "boarded": 17383, - "denominations": 17384, - "interception": 17385, - "##jas": 17386, - "cookie": 17387, - "nicola": 17388, - "##tee": 17389, - "algebraic": 17390, - "marquess": 17391, - "bahn": 17392, - "parole": 17393, - "buyers": 17394, - "bait": 17395, - "turbines": 17396, - "paperwork": 17397, - "bestowed": 17398, - "natasha": 17399, - "renee": 17400, - "oceans": 17401, - "purchases": 17402, - "157": 17403, - "vaccine": 17404, - "215": 17405, - "##tock": 17406, - "fixtures": 17407, - "playhouse": 17408, - "integrate": 17409, - "jai": 17410, - "oswald": 17411, - "intellectuals": 17412, - "##cky": 17413, - "booked": 17414, - "nests": 17415, - "mortimer": 17416, - "##isi": 17417, - "obsession": 17418, - "sept": 17419, - "##gler": 17420, - "##sum": 17421, - "440": 17422, - "scrutiny": 17423, - "simultaneous": 17424, - "squinted": 17425, - "##shin": 17426, - "collects": 17427, - "oven": 17428, - "shankar": 17429, - "penned": 17430, - "remarkably": 17431, - "##я": 17432, - "slips": 17433, - "luggage": 17434, - "spectral": 17435, - "1786": 17436, - "collaborations": 17437, - "louie": 17438, - "consolidation": 17439, - "##ailed": 17440, - "##ivating": 17441, - "420": 17442, - "hoover": 17443, - "blackpool": 17444, - "harness": 17445, - "ignition": 17446, - "vest": 17447, - "tails": 17448, - "belmont": 17449, - "mongol": 17450, - "skinner": 17451, - "##nae": 17452, - "visually": 17453, - "mage": 17454, - "derry": 17455, - "##tism": 17456, - "##unce": 17457, - "stevie": 17458, - "transitional": 17459, - "##rdy": 17460, - "redskins": 17461, - "drying": 17462, - "prep": 17463, - "prospective": 17464, - "##21": 17465, - "annoyance": 17466, - "oversee": 17467, - "##loaded": 17468, - "fills": 17469, - "##books": 17470, - "##iki": 17471, - "announces": 17472, - "fda": 17473, - "scowled": 17474, - "respects": 17475, - "prasad": 17476, - "mystic": 17477, - "tucson": 17478, - "##vale": 17479, - "revue": 17480, - "springer": 17481, - "bankrupt": 17482, - "1772": 17483, - "aristotle": 17484, - "salvatore": 17485, - "habsburg": 17486, - "##geny": 17487, - "dal": 17488, - "natal": 17489, - "nut": 17490, - "pod": 17491, - "chewing": 17492, - "darts": 17493, - "moroccan": 17494, - "walkover": 17495, - "rosario": 17496, - "lenin": 17497, - "punjabi": 17498, - "##ße": 17499, - "grossed": 17500, - "scattering": 17501, - "wired": 17502, - "invasive": 17503, - "hui": 17504, - "polynomial": 17505, - "corridors": 17506, - "wakes": 17507, - "gina": 17508, - "portrays": 17509, - "##cratic": 17510, - "arid": 17511, - "retreating": 17512, - "erich": 17513, - "irwin": 17514, - "sniper": 17515, - "##dha": 17516, - "linen": 17517, - "lindsey": 17518, - "maneuver": 17519, - "butch": 17520, - "shutting": 17521, - "socio": 17522, - "bounce": 17523, - "commemorative": 17524, - "postseason": 17525, - "jeremiah": 17526, - "pines": 17527, - "275": 17528, - "mystical": 17529, - "beads": 17530, - "bp": 17531, - "abbas": 17532, - "furnace": 17533, - "bidding": 17534, - "consulted": 17535, - "assaulted": 17536, - "empirical": 17537, - "rubble": 17538, - "enclosure": 17539, - "sob": 17540, - "weakly": 17541, - "cancel": 17542, - "polly": 17543, - "yielded": 17544, - "##emann": 17545, - "curly": 17546, - "prediction": 17547, - "battered": 17548, - "70s": 17549, - "vhs": 17550, - "jacqueline": 17551, - "render": 17552, - "sails": 17553, - "barked": 17554, - "detailing": 17555, - "grayson": 17556, - "riga": 17557, - "sloane": 17558, - "raging": 17559, - "##yah": 17560, - "herbs": 17561, - "bravo": 17562, - "##athlon": 17563, - "alloy": 17564, - "giggle": 17565, - "imminent": 17566, - "suffers": 17567, - "assumptions": 17568, - "waltz": 17569, - "##itate": 17570, - "accomplishments": 17571, - "##ited": 17572, - "bathing": 17573, - "remixed": 17574, - "deception": 17575, - "prefix": 17576, - "##emia": 17577, - "deepest": 17578, - "##tier": 17579, - "##eis": 17580, - "balkan": 17581, - "frogs": 17582, - "##rong": 17583, - "slab": 17584, - "##pate": 17585, - "philosophers": 17586, - "peterborough": 17587, - "grains": 17588, - "imports": 17589, - "dickinson": 17590, - "rwanda": 17591, - "##atics": 17592, - "1774": 17593, - "dirk": 17594, - "lan": 17595, - "tablets": 17596, - "##rove": 17597, - "clone": 17598, - "##rice": 17599, - "caretaker": 17600, - "hostilities": 17601, - "mclean": 17602, - "##gre": 17603, - "regimental": 17604, - "treasures": 17605, - "norms": 17606, - "impose": 17607, - "tsar": 17608, - "tango": 17609, - "diplomacy": 17610, - "variously": 17611, - "complain": 17612, - "192": 17613, - "recognise": 17614, - "arrests": 17615, - "1779": 17616, - "celestial": 17617, - "pulitzer": 17618, - "##dus": 17619, - "bing": 17620, - "libretto": 17621, - "##moor": 17622, - "adele": 17623, - "splash": 17624, - "##rite": 17625, - "expectation": 17626, - "lds": 17627, - "confronts": 17628, - "##izer": 17629, - "spontaneous": 17630, - "harmful": 17631, - "wedge": 17632, - "entrepreneurs": 17633, - "buyer": 17634, - "##ope": 17635, - "bilingual": 17636, - "translate": 17637, - "rugged": 17638, - "conner": 17639, - "circulated": 17640, - "uae": 17641, - "eaton": 17642, - "##gra": 17643, - "##zzle": 17644, - "lingered": 17645, - "lockheed": 17646, - "vishnu": 17647, - "reelection": 17648, - "alonso": 17649, - "##oom": 17650, - "joints": 17651, - "yankee": 17652, - "headline": 17653, - "cooperate": 17654, - "heinz": 17655, - "laureate": 17656, - "invading": 17657, - "##sford": 17658, - "echoes": 17659, - "scandinavian": 17660, - "##dham": 17661, - "hugging": 17662, - "vitamin": 17663, - "salute": 17664, - "micah": 17665, - "hind": 17666, - "trader": 17667, - "##sper": 17668, - "radioactive": 17669, - "##ndra": 17670, - "militants": 17671, - "poisoned": 17672, - "ratified": 17673, - "remark": 17674, - "campeonato": 17675, - "deprived": 17676, - "wander": 17677, - "prop": 17678, - "##dong": 17679, - "outlook": 17680, - "##tani": 17681, - "##rix": 17682, - "##eye": 17683, - "chiang": 17684, - "darcy": 17685, - "##oping": 17686, - "mandolin": 17687, - "spice": 17688, - "statesman": 17689, - "babylon": 17690, - "182": 17691, - "walled": 17692, - "forgetting": 17693, - "afro": 17694, - "##cap": 17695, - "158": 17696, - "giorgio": 17697, - "buffer": 17698, - "##polis": 17699, - "planetary": 17700, - "##gis": 17701, - "overlap": 17702, - "terminals": 17703, - "kinda": 17704, - "centenary": 17705, - "##bir": 17706, - "arising": 17707, - "manipulate": 17708, - "elm": 17709, - "ke": 17710, - "1770": 17711, - "ak": 17712, - "##tad": 17713, - "chrysler": 17714, - "mapped": 17715, - "moose": 17716, - "pomeranian": 17717, - "quad": 17718, - "macarthur": 17719, - "assemblies": 17720, - "shoreline": 17721, - "recalls": 17722, - "stratford": 17723, - "##rted": 17724, - "noticeable": 17725, - "##evic": 17726, - "imp": 17727, - "##rita": 17728, - "##sque": 17729, - "accustomed": 17730, - "supplying": 17731, - "tents": 17732, - "disgusted": 17733, - "vogue": 17734, - "sipped": 17735, - "filters": 17736, - "khz": 17737, - "reno": 17738, - "selecting": 17739, - "luftwaffe": 17740, - "mcmahon": 17741, - "tyne": 17742, - "masterpiece": 17743, - "carriages": 17744, - "collided": 17745, - "dunes": 17746, - "exercised": 17747, - "flare": 17748, - "remembers": 17749, - "muzzle": 17750, - "##mobile": 17751, - "heck": 17752, - "##rson": 17753, - "burgess": 17754, - "lunged": 17755, - "middleton": 17756, - "boycott": 17757, - "bilateral": 17758, - "##sity": 17759, - "hazardous": 17760, - "lumpur": 17761, - "multiplayer": 17762, - "spotlight": 17763, - "jackets": 17764, - "goldman": 17765, - "liege": 17766, - "porcelain": 17767, - "rag": 17768, - "waterford": 17769, - "benz": 17770, - "attracts": 17771, - "hopeful": 17772, - "battling": 17773, - "ottomans": 17774, - "kensington": 17775, - "baked": 17776, - "hymns": 17777, - "cheyenne": 17778, - "lattice": 17779, - "levine": 17780, - "borrow": 17781, - "polymer": 17782, - "clashes": 17783, - "michaels": 17784, - "monitored": 17785, - "commitments": 17786, - "denounced": 17787, - "##25": 17788, - "##von": 17789, - "cavity": 17790, - "##oney": 17791, - "hobby": 17792, - "akin": 17793, - "##holders": 17794, - "futures": 17795, - "intricate": 17796, - "cornish": 17797, - "patty": 17798, - "##oned": 17799, - "illegally": 17800, - "dolphin": 17801, - "##lag": 17802, - "barlow": 17803, - "yellowish": 17804, - "maddie": 17805, - "apologized": 17806, - "luton": 17807, - "plagued": 17808, - "##puram": 17809, - "nana": 17810, - "##rds": 17811, - "sway": 17812, - "fanny": 17813, - "łodz": 17814, - "##rino": 17815, - "psi": 17816, - "suspicions": 17817, - "hanged": 17818, - "##eding": 17819, - "initiate": 17820, - "charlton": 17821, - "##por": 17822, - "nak": 17823, - "competent": 17824, - "235": 17825, - "analytical": 17826, - "annex": 17827, - "wardrobe": 17828, - "reservations": 17829, - "##rma": 17830, - "sect": 17831, - "162": 17832, - "fairfax": 17833, - "hedge": 17834, - "piled": 17835, - "buckingham": 17836, - "uneven": 17837, - "bauer": 17838, - "simplicity": 17839, - "snyder": 17840, - "interpret": 17841, - "accountability": 17842, - "donors": 17843, - "moderately": 17844, - "byrd": 17845, - "continents": 17846, - "##cite": 17847, - "##max": 17848, - "disciple": 17849, - "hr": 17850, - "jamaican": 17851, - "ping": 17852, - "nominees": 17853, - "##uss": 17854, - "mongolian": 17855, - "diver": 17856, - "attackers": 17857, - "eagerly": 17858, - "ideological": 17859, - "pillows": 17860, - "miracles": 17861, - "apartheid": 17862, - "revolver": 17863, - "sulfur": 17864, - "clinics": 17865, - "moran": 17866, - "163": 17867, - "##enko": 17868, - "ile": 17869, - "katy": 17870, - "rhetoric": 17871, - "##icated": 17872, - "chronology": 17873, - "recycling": 17874, - "##hrer": 17875, - "elongated": 17876, - "mughal": 17877, - "pascal": 17878, - "profiles": 17879, - "vibration": 17880, - "databases": 17881, - "domination": 17882, - "##fare": 17883, - "##rant": 17884, - "matthias": 17885, - "digest": 17886, - "rehearsal": 17887, - "polling": 17888, - "weiss": 17889, - "initiation": 17890, - "reeves": 17891, - "clinging": 17892, - "flourished": 17893, - "impress": 17894, - "ngo": 17895, - "##hoff": 17896, - "##ume": 17897, - "buckley": 17898, - "symposium": 17899, - "rhythms": 17900, - "weed": 17901, - "emphasize": 17902, - "transforming": 17903, - "##taking": 17904, - "##gence": 17905, - "##yman": 17906, - "accountant": 17907, - "analyze": 17908, - "flicker": 17909, - "foil": 17910, - "priesthood": 17911, - "voluntarily": 17912, - "decreases": 17913, - "##80": 17914, - "##hya": 17915, - "slater": 17916, - "sv": 17917, - "charting": 17918, - "mcgill": 17919, - "##lde": 17920, - "moreno": 17921, - "##iu": 17922, - "besieged": 17923, - "zur": 17924, - "robes": 17925, - "##phic": 17926, - "admitting": 17927, - "api": 17928, - "deported": 17929, - "turmoil": 17930, - "peyton": 17931, - "earthquakes": 17932, - "##ares": 17933, - "nationalists": 17934, - "beau": 17935, - "clair": 17936, - "brethren": 17937, - "interrupt": 17938, - "welch": 17939, - "curated": 17940, - "galerie": 17941, - "requesting": 17942, - "164": 17943, - "##ested": 17944, - "impending": 17945, - "steward": 17946, - "viper": 17947, - "##vina": 17948, - "complaining": 17949, - "beautifully": 17950, - "brandy": 17951, - "foam": 17952, - "nl": 17953, - "1660": 17954, - "##cake": 17955, - "alessandro": 17956, - "punches": 17957, - "laced": 17958, - "explanations": 17959, - "##lim": 17960, - "attribute": 17961, - "clit": 17962, - "reggie": 17963, - "discomfort": 17964, - "##cards": 17965, - "smoothed": 17966, - "whales": 17967, - "##cene": 17968, - "adler": 17969, - "countered": 17970, - "duffy": 17971, - "disciplinary": 17972, - "widening": 17973, - "recipe": 17974, - "reliance": 17975, - "conducts": 17976, - "goats": 17977, - "gradient": 17978, - "preaching": 17979, - "##shaw": 17980, - "matilda": 17981, - "quasi": 17982, - "striped": 17983, - "meridian": 17984, - "cannabis": 17985, - "cordoba": 17986, - "certificates": 17987, - "##agh": 17988, - "##tering": 17989, - "graffiti": 17990, - "hangs": 17991, - "pilgrims": 17992, - "repeats": 17993, - "##ych": 17994, - "revive": 17995, - "urine": 17996, - "etat": 17997, - "##hawk": 17998, - "fueled": 17999, - "belts": 18000, - "fuzzy": 18001, - "susceptible": 18002, - "##hang": 18003, - "mauritius": 18004, - "salle": 18005, - "sincere": 18006, - "beers": 18007, - "hooks": 18008, - "##cki": 18009, - "arbitration": 18010, - "entrusted": 18011, - "advise": 18012, - "sniffed": 18013, - "seminar": 18014, - "junk": 18015, - "donnell": 18016, - "processors": 18017, - "principality": 18018, - "strapped": 18019, - "celia": 18020, - "mendoza": 18021, - "everton": 18022, - "fortunes": 18023, - "prejudice": 18024, - "starving": 18025, - "reassigned": 18026, - "steamer": 18027, - "##lund": 18028, - "tuck": 18029, - "evenly": 18030, - "foreman": 18031, - "##ffen": 18032, - "dans": 18033, - "375": 18034, - "envisioned": 18035, - "slit": 18036, - "##xy": 18037, - "baseman": 18038, - "liberia": 18039, - "rosemary": 18040, - "##weed": 18041, - "electrified": 18042, - "periodically": 18043, - "potassium": 18044, - "stride": 18045, - "contexts": 18046, - "sperm": 18047, - "slade": 18048, - "mariners": 18049, - "influx": 18050, - "bianca": 18051, - "subcommittee": 18052, - "##rane": 18053, - "spilling": 18054, - "icao": 18055, - "estuary": 18056, - "##nock": 18057, - "delivers": 18058, - "iphone": 18059, - "##ulata": 18060, - "isa": 18061, - "mira": 18062, - "bohemian": 18063, - "dessert": 18064, - "##sbury": 18065, - "welcoming": 18066, - "proudly": 18067, - "slowing": 18068, - "##chs": 18069, - "musee": 18070, - "ascension": 18071, - "russ": 18072, - "##vian": 18073, - "waits": 18074, - "##psy": 18075, - "africans": 18076, - "exploit": 18077, - "##morphic": 18078, - "gov": 18079, - "eccentric": 18080, - "crab": 18081, - "peck": 18082, - "##ull": 18083, - "entrances": 18084, - "formidable": 18085, - "marketplace": 18086, - "groom": 18087, - "bolted": 18088, - "metabolism": 18089, - "patton": 18090, - "robbins": 18091, - "courier": 18092, - "payload": 18093, - "endure": 18094, - "##ifier": 18095, - "andes": 18096, - "refrigerator": 18097, - "##pr": 18098, - "ornate": 18099, - "##uca": 18100, - "ruthless": 18101, - "illegitimate": 18102, - "masonry": 18103, - "strasbourg": 18104, - "bikes": 18105, - "adobe": 18106, - "##³": 18107, - "apples": 18108, - "quintet": 18109, - "willingly": 18110, - "niche": 18111, - "bakery": 18112, - "corpses": 18113, - "energetic": 18114, - "##cliffe": 18115, - "##sser": 18116, - "##ards": 18117, - "177": 18118, - "centimeters": 18119, - "centro": 18120, - "fuscous": 18121, - "cretaceous": 18122, - "rancho": 18123, - "##yde": 18124, - "andrei": 18125, - "telecom": 18126, - "tottenham": 18127, - "oasis": 18128, - "ordination": 18129, - "vulnerability": 18130, - "presiding": 18131, - "corey": 18132, - "cp": 18133, - "penguins": 18134, - "sims": 18135, - "##pis": 18136, - "malawi": 18137, - "piss": 18138, - "##48": 18139, - "correction": 18140, - "##cked": 18141, - "##ffle": 18142, - "##ryn": 18143, - "countdown": 18144, - "detectives": 18145, - "psychiatrist": 18146, - "psychedelic": 18147, - "dinosaurs": 18148, - "blouse": 18149, - "##get": 18150, - "choi": 18151, - "vowed": 18152, - "##oz": 18153, - "randomly": 18154, - "##pol": 18155, - "49ers": 18156, - "scrub": 18157, - "blanche": 18158, - "bruins": 18159, - "dusseldorf": 18160, - "##using": 18161, - "unwanted": 18162, - "##ums": 18163, - "212": 18164, - "dominique": 18165, - "elevations": 18166, - "headlights": 18167, - "om": 18168, - "laguna": 18169, - "##oga": 18170, - "1750": 18171, - "famously": 18172, - "ignorance": 18173, - "shrewsbury": 18174, - "##aine": 18175, - "ajax": 18176, - "breuning": 18177, - "che": 18178, - "confederacy": 18179, - "greco": 18180, - "overhaul": 18181, - "##screen": 18182, - "paz": 18183, - "skirts": 18184, - "disagreement": 18185, - "cruelty": 18186, - "jagged": 18187, - "phoebe": 18188, - "shifter": 18189, - "hovered": 18190, - "viruses": 18191, - "##wes": 18192, - "mandy": 18193, - "##lined": 18194, - "##gc": 18195, - "landlord": 18196, - "squirrel": 18197, - "dashed": 18198, - "##ι": 18199, - "ornamental": 18200, - "gag": 18201, - "wally": 18202, - "grange": 18203, - "literal": 18204, - "spurs": 18205, - "undisclosed": 18206, - "proceeding": 18207, - "yin": 18208, - "##text": 18209, - "billie": 18210, - "orphan": 18211, - "spanned": 18212, - "humidity": 18213, - "indy": 18214, - "weighted": 18215, - "presentations": 18216, - "explosions": 18217, - "lucian": 18218, - "##tary": 18219, - "vaughn": 18220, - "hindus": 18221, - "##anga": 18222, - "##hell": 18223, - "psycho": 18224, - "171": 18225, - "daytona": 18226, - "protects": 18227, - "efficiently": 18228, - "rematch": 18229, - "sly": 18230, - "tandem": 18231, - "##oya": 18232, - "rebranded": 18233, - "impaired": 18234, - "hee": 18235, - "metropolis": 18236, - "peach": 18237, - "godfrey": 18238, - "diaspora": 18239, - "ethnicity": 18240, - "prosperous": 18241, - "gleaming": 18242, - "dar": 18243, - "grossing": 18244, - "playback": 18245, - "##rden": 18246, - "stripe": 18247, - "pistols": 18248, - "##tain": 18249, - "births": 18250, - "labelled": 18251, - "##cating": 18252, - "172": 18253, - "rudy": 18254, - "alba": 18255, - "##onne": 18256, - "aquarium": 18257, - "hostility": 18258, - "##gb": 18259, - "##tase": 18260, - "shudder": 18261, - "sumatra": 18262, - "hardest": 18263, - "lakers": 18264, - "consonant": 18265, - "creeping": 18266, - "demos": 18267, - "homicide": 18268, - "capsule": 18269, - "zeke": 18270, - "liberties": 18271, - "expulsion": 18272, - "pueblo": 18273, - "##comb": 18274, - "trait": 18275, - "transporting": 18276, - "##ddin": 18277, - "##neck": 18278, - "##yna": 18279, - "depart": 18280, - "gregg": 18281, - "mold": 18282, - "ledge": 18283, - "hangar": 18284, - "oldham": 18285, - "playboy": 18286, - "termination": 18287, - "analysts": 18288, - "gmbh": 18289, - "romero": 18290, - "##itic": 18291, - "insist": 18292, - "cradle": 18293, - "filthy": 18294, - "brightness": 18295, - "slash": 18296, - "shootout": 18297, - "deposed": 18298, - "bordering": 18299, - "##truct": 18300, - "isis": 18301, - "microwave": 18302, - "tumbled": 18303, - "sheltered": 18304, - "cathy": 18305, - "werewolves": 18306, - "messy": 18307, - "andersen": 18308, - "convex": 18309, - "clapped": 18310, - "clinched": 18311, - "satire": 18312, - "wasting": 18313, - "edo": 18314, - "vc": 18315, - "rufus": 18316, - "##jak": 18317, - "mont": 18318, - "##etti": 18319, - "poznan": 18320, - "##keeping": 18321, - "restructuring": 18322, - "transverse": 18323, - "##rland": 18324, - "azerbaijani": 18325, - "slovene": 18326, - "gestures": 18327, - "roommate": 18328, - "choking": 18329, - "shear": 18330, - "##quist": 18331, - "vanguard": 18332, - "oblivious": 18333, - "##hiro": 18334, - "disagreed": 18335, - "baptism": 18336, - "##lich": 18337, - "coliseum": 18338, - "##aceae": 18339, - "salvage": 18340, - "societe": 18341, - "cory": 18342, - "locke": 18343, - "relocation": 18344, - "relying": 18345, - "versailles": 18346, - "ahl": 18347, - "swelling": 18348, - "##elo": 18349, - "cheerful": 18350, - "##word": 18351, - "##edes": 18352, - "gin": 18353, - "sarajevo": 18354, - "obstacle": 18355, - "diverted": 18356, - "##nac": 18357, - "messed": 18358, - "thoroughbred": 18359, - "fluttered": 18360, - "utrecht": 18361, - "chewed": 18362, - "acquaintance": 18363, - "assassins": 18364, - "dispatch": 18365, - "mirza": 18366, - "##wart": 18367, - "nike": 18368, - "salzburg": 18369, - "swell": 18370, - "yen": 18371, - "##gee": 18372, - "idle": 18373, - "ligue": 18374, - "samson": 18375, - "##nds": 18376, - "##igh": 18377, - "playful": 18378, - "spawned": 18379, - "##cise": 18380, - "tease": 18381, - "##case": 18382, - "burgundy": 18383, - "##bot": 18384, - "stirring": 18385, - "skeptical": 18386, - "interceptions": 18387, - "marathi": 18388, - "##dies": 18389, - "bedrooms": 18390, - "aroused": 18391, - "pinch": 18392, - "##lik": 18393, - "preferences": 18394, - "tattoos": 18395, - "buster": 18396, - "digitally": 18397, - "projecting": 18398, - "rust": 18399, - "##ital": 18400, - "kitten": 18401, - "priorities": 18402, - "addison": 18403, - "pseudo": 18404, - "##guard": 18405, - "dusk": 18406, - "icons": 18407, - "sermon": 18408, - "##psis": 18409, - "##iba": 18410, - "bt": 18411, - "##lift": 18412, - "##xt": 18413, - "ju": 18414, - "truce": 18415, - "rink": 18416, - "##dah": 18417, - "##wy": 18418, - "defects": 18419, - "psychiatry": 18420, - "offences": 18421, - "calculate": 18422, - "glucose": 18423, - "##iful": 18424, - "##rized": 18425, - "##unda": 18426, - "francaise": 18427, - "##hari": 18428, - "richest": 18429, - "warwickshire": 18430, - "carly": 18431, - "1763": 18432, - "purity": 18433, - "redemption": 18434, - "lending": 18435, - "##cious": 18436, - "muse": 18437, - "bruises": 18438, - "cerebral": 18439, - "aero": 18440, - "carving": 18441, - "##name": 18442, - "preface": 18443, - "terminology": 18444, - "invade": 18445, - "monty": 18446, - "##int": 18447, - "anarchist": 18448, - "blurred": 18449, - "##iled": 18450, - "rossi": 18451, - "treats": 18452, - "guts": 18453, - "shu": 18454, - "foothills": 18455, - "ballads": 18456, - "undertaking": 18457, - "premise": 18458, - "cecilia": 18459, - "affiliates": 18460, - "blasted": 18461, - "conditional": 18462, - "wilder": 18463, - "minors": 18464, - "drone": 18465, - "rudolph": 18466, - "buffy": 18467, - "swallowing": 18468, - "horton": 18469, - "attested": 18470, - "##hop": 18471, - "rutherford": 18472, - "howell": 18473, - "primetime": 18474, - "livery": 18475, - "penal": 18476, - "##bis": 18477, - "minimize": 18478, - "hydro": 18479, - "wrecked": 18480, - "wrought": 18481, - "palazzo": 18482, - "##gling": 18483, - "cans": 18484, - "vernacular": 18485, - "friedman": 18486, - "nobleman": 18487, - "shale": 18488, - "walnut": 18489, - "danielle": 18490, - "##ection": 18491, - "##tley": 18492, - "sears": 18493, - "##kumar": 18494, - "chords": 18495, - "lend": 18496, - "flipping": 18497, - "streamed": 18498, - "por": 18499, - "dracula": 18500, - "gallons": 18501, - "sacrifices": 18502, - "gamble": 18503, - "orphanage": 18504, - "##iman": 18505, - "mckenzie": 18506, - "##gible": 18507, - "boxers": 18508, - "daly": 18509, - "##balls": 18510, - "##ان": 18511, - "208": 18512, - "##ific": 18513, - "##rative": 18514, - "##iq": 18515, - "exploited": 18516, - "slated": 18517, - "##uity": 18518, - "circling": 18519, - "hillary": 18520, - "pinched": 18521, - "goldberg": 18522, - "provost": 18523, - "campaigning": 18524, - "lim": 18525, - "piles": 18526, - "ironically": 18527, - "jong": 18528, - "mohan": 18529, - "successors": 18530, - "usaf": 18531, - "##tem": 18532, - "##ught": 18533, - "autobiographical": 18534, - "haute": 18535, - "preserves": 18536, - "##ending": 18537, - "acquitted": 18538, - "comparisons": 18539, - "203": 18540, - "hydroelectric": 18541, - "gangs": 18542, - "cypriot": 18543, - "torpedoes": 18544, - "rushes": 18545, - "chrome": 18546, - "derive": 18547, - "bumps": 18548, - "instability": 18549, - "fiat": 18550, - "pets": 18551, - "##mbe": 18552, - "silas": 18553, - "dye": 18554, - "reckless": 18555, - "settler": 18556, - "##itation": 18557, - "info": 18558, - "heats": 18559, - "##writing": 18560, - "176": 18561, - "canonical": 18562, - "maltese": 18563, - "fins": 18564, - "mushroom": 18565, - "stacy": 18566, - "aspen": 18567, - "avid": 18568, - "##kur": 18569, - "##loading": 18570, - "vickers": 18571, - "gaston": 18572, - "hillside": 18573, - "statutes": 18574, - "wilde": 18575, - "gail": 18576, - "kung": 18577, - "sabine": 18578, - "comfortably": 18579, - "motorcycles": 18580, - "##rgo": 18581, - "169": 18582, - "pneumonia": 18583, - "fetch": 18584, - "##sonic": 18585, - "axel": 18586, - "faintly": 18587, - "parallels": 18588, - "##oop": 18589, - "mclaren": 18590, - "spouse": 18591, - "compton": 18592, - "interdisciplinary": 18593, - "miner": 18594, - "##eni": 18595, - "181": 18596, - "clamped": 18597, - "##chal": 18598, - "##llah": 18599, - "separates": 18600, - "versa": 18601, - "##mler": 18602, - "scarborough": 18603, - "labrador": 18604, - "##lity": 18605, - "##osing": 18606, - "rutgers": 18607, - "hurdles": 18608, - "como": 18609, - "166": 18610, - "burt": 18611, - "divers": 18612, - "##100": 18613, - "wichita": 18614, - "cade": 18615, - "coincided": 18616, - "##erson": 18617, - "bruised": 18618, - "mla": 18619, - "##pper": 18620, - "vineyard": 18621, - "##ili": 18622, - "##brush": 18623, - "notch": 18624, - "mentioning": 18625, - "jase": 18626, - "hearted": 18627, - "kits": 18628, - "doe": 18629, - "##acle": 18630, - "pomerania": 18631, - "##ady": 18632, - "ronan": 18633, - "seizure": 18634, - "pavel": 18635, - "problematic": 18636, - "##zaki": 18637, - "domenico": 18638, - "##ulin": 18639, - "catering": 18640, - "penelope": 18641, - "dependence": 18642, - "parental": 18643, - "emilio": 18644, - "ministerial": 18645, - "atkinson": 18646, - "##bolic": 18647, - "clarkson": 18648, - "chargers": 18649, - "colby": 18650, - "grill": 18651, - "peeked": 18652, - "arises": 18653, - "summon": 18654, - "##aged": 18655, - "fools": 18656, - "##grapher": 18657, - "faculties": 18658, - "qaeda": 18659, - "##vial": 18660, - "garner": 18661, - "refurbished": 18662, - "##hwa": 18663, - "geelong": 18664, - "disasters": 18665, - "nudged": 18666, - "bs": 18667, - "shareholder": 18668, - "lori": 18669, - "algae": 18670, - "reinstated": 18671, - "rot": 18672, - "##ades": 18673, - "##nous": 18674, - "invites": 18675, - "stainless": 18676, - "183": 18677, - "inclusive": 18678, - "##itude": 18679, - "diocesan": 18680, - "til": 18681, - "##icz": 18682, - "denomination": 18683, - "##xa": 18684, - "benton": 18685, - "floral": 18686, - "registers": 18687, - "##ider": 18688, - "##erman": 18689, - "##kell": 18690, - "absurd": 18691, - "brunei": 18692, - "guangzhou": 18693, - "hitter": 18694, - "retaliation": 18695, - "##uled": 18696, - "##eve": 18697, - "blanc": 18698, - "nh": 18699, - "consistency": 18700, - "contamination": 18701, - "##eres": 18702, - "##rner": 18703, - "dire": 18704, - "palermo": 18705, - "broadcasters": 18706, - "diaries": 18707, - "inspire": 18708, - "vols": 18709, - "brewer": 18710, - "tightening": 18711, - "ky": 18712, - "mixtape": 18713, - "hormone": 18714, - "##tok": 18715, - "stokes": 18716, - "##color": 18717, - "##dly": 18718, - "##ssi": 18719, - "pg": 18720, - "##ometer": 18721, - "##lington": 18722, - "sanitation": 18723, - "##tility": 18724, - "intercontinental": 18725, - "apps": 18726, - "##adt": 18727, - "¹⁄₂": 18728, - "cylinders": 18729, - "economies": 18730, - "favourable": 18731, - "unison": 18732, - "croix": 18733, - "gertrude": 18734, - "odyssey": 18735, - "vanity": 18736, - "dangling": 18737, - "##logists": 18738, - "upgrades": 18739, - "dice": 18740, - "middleweight": 18741, - "practitioner": 18742, - "##ight": 18743, - "206": 18744, - "henrik": 18745, - "parlor": 18746, - "orion": 18747, - "angered": 18748, - "lac": 18749, - "python": 18750, - "blurted": 18751, - "##rri": 18752, - "sensual": 18753, - "intends": 18754, - "swings": 18755, - "angled": 18756, - "##phs": 18757, - "husky": 18758, - "attain": 18759, - "peerage": 18760, - "precinct": 18761, - "textiles": 18762, - "cheltenham": 18763, - "shuffled": 18764, - "dai": 18765, - "confess": 18766, - "tasting": 18767, - "bhutan": 18768, - "##riation": 18769, - "tyrone": 18770, - "segregation": 18771, - "abrupt": 18772, - "ruiz": 18773, - "##rish": 18774, - "smirked": 18775, - "blackwell": 18776, - "confidential": 18777, - "browning": 18778, - "amounted": 18779, - "##put": 18780, - "vase": 18781, - "scarce": 18782, - "fabulous": 18783, - "raided": 18784, - "staple": 18785, - "guyana": 18786, - "unemployed": 18787, - "glider": 18788, - "shay": 18789, - "##tow": 18790, - "carmine": 18791, - "troll": 18792, - "intervene": 18793, - "squash": 18794, - "superstar": 18795, - "##uce": 18796, - "cylindrical": 18797, - "len": 18798, - "roadway": 18799, - "researched": 18800, - "handy": 18801, - "##rium": 18802, - "##jana": 18803, - "meta": 18804, - "lao": 18805, - "declares": 18806, - "##rring": 18807, - "##tadt": 18808, - "##elin": 18809, - "##kova": 18810, - "willem": 18811, - "shrubs": 18812, - "napoleonic": 18813, - "realms": 18814, - "skater": 18815, - "qi": 18816, - "volkswagen": 18817, - "##ł": 18818, - "tad": 18819, - "hara": 18820, - "archaeologist": 18821, - "awkwardly": 18822, - "eerie": 18823, - "##kind": 18824, - "wiley": 18825, - "##heimer": 18826, - "##24": 18827, - "titus": 18828, - "organizers": 18829, - "cfl": 18830, - "crusaders": 18831, - "lama": 18832, - "usb": 18833, - "vent": 18834, - "enraged": 18835, - "thankful": 18836, - "occupants": 18837, - "maximilian": 18838, - "##gaard": 18839, - "possessing": 18840, - "textbooks": 18841, - "##oran": 18842, - "collaborator": 18843, - "quaker": 18844, - "##ulo": 18845, - "avalanche": 18846, - "mono": 18847, - "silky": 18848, - "straits": 18849, - "isaiah": 18850, - "mustang": 18851, - "surged": 18852, - "resolutions": 18853, - "potomac": 18854, - "descend": 18855, - "cl": 18856, - "kilograms": 18857, - "plato": 18858, - "strains": 18859, - "saturdays": 18860, - "##olin": 18861, - "bernstein": 18862, - "##ype": 18863, - "holstein": 18864, - "ponytail": 18865, - "##watch": 18866, - "belize": 18867, - "conversely": 18868, - "heroine": 18869, - "perpetual": 18870, - "##ylus": 18871, - "charcoal": 18872, - "piedmont": 18873, - "glee": 18874, - "negotiating": 18875, - "backdrop": 18876, - "prologue": 18877, - "##jah": 18878, - "##mmy": 18879, - "pasadena": 18880, - "climbs": 18881, - "ramos": 18882, - "sunni": 18883, - "##holm": 18884, - "##tner": 18885, - "##tri": 18886, - "anand": 18887, - "deficiency": 18888, - "hertfordshire": 18889, - "stout": 18890, - "##avi": 18891, - "aperture": 18892, - "orioles": 18893, - "##irs": 18894, - "doncaster": 18895, - "intrigued": 18896, - "bombed": 18897, - "coating": 18898, - "otis": 18899, - "##mat": 18900, - "cocktail": 18901, - "##jit": 18902, - "##eto": 18903, - "amir": 18904, - "arousal": 18905, - "sar": 18906, - "##proof": 18907, - "##act": 18908, - "##ories": 18909, - "dixie": 18910, - "pots": 18911, - "##bow": 18912, - "whereabouts": 18913, - "159": 18914, - "##fted": 18915, - "drains": 18916, - "bullying": 18917, - "cottages": 18918, - "scripture": 18919, - "coherent": 18920, - "fore": 18921, - "poe": 18922, - "appetite": 18923, - "##uration": 18924, - "sampled": 18925, - "##ators": 18926, - "##dp": 18927, - "derrick": 18928, - "rotor": 18929, - "jays": 18930, - "peacock": 18931, - "installment": 18932, - "##rro": 18933, - "advisors": 18934, - "##coming": 18935, - "rodeo": 18936, - "scotch": 18937, - "##mot": 18938, - "##db": 18939, - "##fen": 18940, - "##vant": 18941, - "ensued": 18942, - "rodrigo": 18943, - "dictatorship": 18944, - "martyrs": 18945, - "twenties": 18946, - "##н": 18947, - "towed": 18948, - "incidence": 18949, - "marta": 18950, - "rainforest": 18951, - "sai": 18952, - "scaled": 18953, - "##cles": 18954, - "oceanic": 18955, - "qualifiers": 18956, - "symphonic": 18957, - "mcbride": 18958, - "dislike": 18959, - "generalized": 18960, - "aubrey": 18961, - "colonization": 18962, - "##iation": 18963, - "##lion": 18964, - "##ssing": 18965, - "disliked": 18966, - "lublin": 18967, - "salesman": 18968, - "##ulates": 18969, - "spherical": 18970, - "whatsoever": 18971, - "sweating": 18972, - "avalon": 18973, - "contention": 18974, - "punt": 18975, - "severity": 18976, - "alderman": 18977, - "atari": 18978, - "##dina": 18979, - "##grant": 18980, - "##rop": 18981, - "scarf": 18982, - "seville": 18983, - "vertices": 18984, - "annexation": 18985, - "fairfield": 18986, - "fascination": 18987, - "inspiring": 18988, - "launches": 18989, - "palatinate": 18990, - "regretted": 18991, - "##rca": 18992, - "feral": 18993, - "##iom": 18994, - "elk": 18995, - "nap": 18996, - "olsen": 18997, - "reddy": 18998, - "yong": 18999, - "##leader": 19000, - "##iae": 19001, - "garment": 19002, - "transports": 19003, - "feng": 19004, - "gracie": 19005, - "outrage": 19006, - "viceroy": 19007, - "insides": 19008, - "##esis": 19009, - "breakup": 19010, - "grady": 19011, - "organizer": 19012, - "softer": 19013, - "grimaced": 19014, - "222": 19015, - "murals": 19016, - "galicia": 19017, - "arranging": 19018, - "vectors": 19019, - "##rsten": 19020, - "bas": 19021, - "##sb": 19022, - "##cens": 19023, - "sloan": 19024, - "##eka": 19025, - "bitten": 19026, - "ara": 19027, - "fender": 19028, - "nausea": 19029, - "bumped": 19030, - "kris": 19031, - "banquet": 19032, - "comrades": 19033, - "detector": 19034, - "persisted": 19035, - "##llan": 19036, - "adjustment": 19037, - "endowed": 19038, - "cinemas": 19039, - "##shot": 19040, - "sellers": 19041, - "##uman": 19042, - "peek": 19043, - "epa": 19044, - "kindly": 19045, - "neglect": 19046, - "simpsons": 19047, - "talon": 19048, - "mausoleum": 19049, - "runaway": 19050, - "hangul": 19051, - "lookout": 19052, - "##cic": 19053, - "rewards": 19054, - "coughed": 19055, - "acquainted": 19056, - "chloride": 19057, - "##ald": 19058, - "quicker": 19059, - "accordion": 19060, - "neolithic": 19061, - "##qa": 19062, - "artemis": 19063, - "coefficient": 19064, - "lenny": 19065, - "pandora": 19066, - "tx": 19067, - "##xed": 19068, - "ecstasy": 19069, - "litter": 19070, - "segunda": 19071, - "chairperson": 19072, - "gemma": 19073, - "hiss": 19074, - "rumor": 19075, - "vow": 19076, - "nasal": 19077, - "antioch": 19078, - "compensate": 19079, - "patiently": 19080, - "transformers": 19081, - "##eded": 19082, - "judo": 19083, - "morrow": 19084, - "penis": 19085, - "posthumous": 19086, - "philips": 19087, - "bandits": 19088, - "husbands": 19089, - "denote": 19090, - "flaming": 19091, - "##any": 19092, - "##phones": 19093, - "langley": 19094, - "yorker": 19095, - "1760": 19096, - "walters": 19097, - "##uo": 19098, - "##kle": 19099, - "gubernatorial": 19100, - "fatty": 19101, - "samsung": 19102, - "leroy": 19103, - "outlaw": 19104, - "##nine": 19105, - "unpublished": 19106, - "poole": 19107, - "jakob": 19108, - "##ᵢ": 19109, - "##ₙ": 19110, - "crete": 19111, - "distorted": 19112, - "superiority": 19113, - "##dhi": 19114, - "intercept": 19115, - "crust": 19116, - "mig": 19117, - "claus": 19118, - "crashes": 19119, - "positioning": 19120, - "188": 19121, - "stallion": 19122, - "301": 19123, - "frontal": 19124, - "armistice": 19125, - "##estinal": 19126, - "elton": 19127, - "aj": 19128, - "encompassing": 19129, - "camel": 19130, - "commemorated": 19131, - "malaria": 19132, - "woodward": 19133, - "calf": 19134, - "cigar": 19135, - "penetrate": 19136, - "##oso": 19137, - "willard": 19138, - "##rno": 19139, - "##uche": 19140, - "illustrate": 19141, - "amusing": 19142, - "convergence": 19143, - "noteworthy": 19144, - "##lma": 19145, - "##rva": 19146, - "journeys": 19147, - "realise": 19148, - "manfred": 19149, - "##sable": 19150, - "410": 19151, - "##vocation": 19152, - "hearings": 19153, - "fiance": 19154, - "##posed": 19155, - "educators": 19156, - "provoked": 19157, - "adjusting": 19158, - "##cturing": 19159, - "modular": 19160, - "stockton": 19161, - "paterson": 19162, - "vlad": 19163, - "rejects": 19164, - "electors": 19165, - "selena": 19166, - "maureen": 19167, - "##tres": 19168, - "uber": 19169, - "##rce": 19170, - "swirled": 19171, - "##num": 19172, - "proportions": 19173, - "nanny": 19174, - "pawn": 19175, - "naturalist": 19176, - "parma": 19177, - "apostles": 19178, - "awoke": 19179, - "ethel": 19180, - "wen": 19181, - "##bey": 19182, - "monsoon": 19183, - "overview": 19184, - "##inating": 19185, - "mccain": 19186, - "rendition": 19187, - "risky": 19188, - "adorned": 19189, - "##ih": 19190, - "equestrian": 19191, - "germain": 19192, - "nj": 19193, - "conspicuous": 19194, - "confirming": 19195, - "##yoshi": 19196, - "shivering": 19197, - "##imeter": 19198, - "milestone": 19199, - "rumours": 19200, - "flinched": 19201, - "bounds": 19202, - "smacked": 19203, - "token": 19204, - "##bei": 19205, - "lectured": 19206, - "automobiles": 19207, - "##shore": 19208, - "impacted": 19209, - "##iable": 19210, - "nouns": 19211, - "nero": 19212, - "##leaf": 19213, - "ismail": 19214, - "prostitute": 19215, - "trams": 19216, - "##lace": 19217, - "bridget": 19218, - "sud": 19219, - "stimulus": 19220, - "impressions": 19221, - "reins": 19222, - "revolves": 19223, - "##oud": 19224, - "##gned": 19225, - "giro": 19226, - "honeymoon": 19227, - "##swell": 19228, - "criterion": 19229, - "##sms": 19230, - "##uil": 19231, - "libyan": 19232, - "prefers": 19233, - "##osition": 19234, - "211": 19235, - "preview": 19236, - "sucks": 19237, - "accusation": 19238, - "bursts": 19239, - "metaphor": 19240, - "diffusion": 19241, - "tolerate": 19242, - "faye": 19243, - "betting": 19244, - "cinematographer": 19245, - "liturgical": 19246, - "specials": 19247, - "bitterly": 19248, - "humboldt": 19249, - "##ckle": 19250, - "flux": 19251, - "rattled": 19252, - "##itzer": 19253, - "archaeologists": 19254, - "odor": 19255, - "authorised": 19256, - "marshes": 19257, - "discretion": 19258, - "##ов": 19259, - "alarmed": 19260, - "archaic": 19261, - "inverse": 19262, - "##leton": 19263, - "explorers": 19264, - "##pine": 19265, - "drummond": 19266, - "tsunami": 19267, - "woodlands": 19268, - "##minate": 19269, - "##tland": 19270, - "booklet": 19271, - "insanity": 19272, - "owning": 19273, - "insert": 19274, - "crafted": 19275, - "calculus": 19276, - "##tore": 19277, - "receivers": 19278, - "##bt": 19279, - "stung": 19280, - "##eca": 19281, - "##nched": 19282, - "prevailing": 19283, - "travellers": 19284, - "eyeing": 19285, - "lila": 19286, - "graphs": 19287, - "##borne": 19288, - "178": 19289, - "julien": 19290, - "##won": 19291, - "morale": 19292, - "adaptive": 19293, - "therapist": 19294, - "erica": 19295, - "cw": 19296, - "libertarian": 19297, - "bowman": 19298, - "pitches": 19299, - "vita": 19300, - "##ional": 19301, - "crook": 19302, - "##ads": 19303, - "##entation": 19304, - "caledonia": 19305, - "mutiny": 19306, - "##sible": 19307, - "1840s": 19308, - "automation": 19309, - "##ß": 19310, - "flock": 19311, - "##pia": 19312, - "ironic": 19313, - "pathology": 19314, - "##imus": 19315, - "remarried": 19316, - "##22": 19317, - "joker": 19318, - "withstand": 19319, - "energies": 19320, - "##att": 19321, - "shropshire": 19322, - "hostages": 19323, - "madeleine": 19324, - "tentatively": 19325, - "conflicting": 19326, - "mateo": 19327, - "recipes": 19328, - "euros": 19329, - "ol": 19330, - "mercenaries": 19331, - "nico": 19332, - "##ndon": 19333, - "albuquerque": 19334, - "augmented": 19335, - "mythical": 19336, - "bel": 19337, - "freud": 19338, - "##child": 19339, - "cough": 19340, - "##lica": 19341, - "365": 19342, - "freddy": 19343, - "lillian": 19344, - "genetically": 19345, - "nuremberg": 19346, - "calder": 19347, - "209": 19348, - "bonn": 19349, - "outdoors": 19350, - "paste": 19351, - "suns": 19352, - "urgency": 19353, - "vin": 19354, - "restraint": 19355, - "tyson": 19356, - "##cera": 19357, - "##selle": 19358, - "barrage": 19359, - "bethlehem": 19360, - "kahn": 19361, - "##par": 19362, - "mounts": 19363, - "nippon": 19364, - "barony": 19365, - "happier": 19366, - "ryu": 19367, - "makeshift": 19368, - "sheldon": 19369, - "blushed": 19370, - "castillo": 19371, - "barking": 19372, - "listener": 19373, - "taped": 19374, - "bethel": 19375, - "fluent": 19376, - "headlines": 19377, - "pornography": 19378, - "rum": 19379, - "disclosure": 19380, - "sighing": 19381, - "mace": 19382, - "doubling": 19383, - "gunther": 19384, - "manly": 19385, - "##plex": 19386, - "rt": 19387, - "interventions": 19388, - "physiological": 19389, - "forwards": 19390, - "emerges": 19391, - "##tooth": 19392, - "##gny": 19393, - "compliment": 19394, - "rib": 19395, - "recession": 19396, - "visibly": 19397, - "barge": 19398, - "faults": 19399, - "connector": 19400, - "exquisite": 19401, - "prefect": 19402, - "##rlin": 19403, - "patio": 19404, - "##cured": 19405, - "elevators": 19406, - "brandt": 19407, - "italics": 19408, - "pena": 19409, - "173": 19410, - "wasp": 19411, - "satin": 19412, - "ea": 19413, - "botswana": 19414, - "graceful": 19415, - "respectable": 19416, - "##jima": 19417, - "##rter": 19418, - "##oic": 19419, - "franciscan": 19420, - "generates": 19421, - "##dl": 19422, - "alfredo": 19423, - "disgusting": 19424, - "##olate": 19425, - "##iously": 19426, - "sherwood": 19427, - "warns": 19428, - "cod": 19429, - "promo": 19430, - "cheryl": 19431, - "sino": 19432, - "##ة": 19433, - "##escu": 19434, - "twitch": 19435, - "##zhi": 19436, - "brownish": 19437, - "thom": 19438, - "ortiz": 19439, - "##dron": 19440, - "densely": 19441, - "##beat": 19442, - "carmel": 19443, - "reinforce": 19444, - "##bana": 19445, - "187": 19446, - "anastasia": 19447, - "downhill": 19448, - "vertex": 19449, - "contaminated": 19450, - "remembrance": 19451, - "harmonic": 19452, - "homework": 19453, - "##sol": 19454, - "fiancee": 19455, - "gears": 19456, - "olds": 19457, - "angelica": 19458, - "loft": 19459, - "ramsay": 19460, - "quiz": 19461, - "colliery": 19462, - "sevens": 19463, - "##cape": 19464, - "autism": 19465, - "##hil": 19466, - "walkway": 19467, - "##boats": 19468, - "ruben": 19469, - "abnormal": 19470, - "ounce": 19471, - "khmer": 19472, - "##bbe": 19473, - "zachary": 19474, - "bedside": 19475, - "morphology": 19476, - "punching": 19477, - "##olar": 19478, - "sparrow": 19479, - "convinces": 19480, - "##35": 19481, - "hewitt": 19482, - "queer": 19483, - "remastered": 19484, - "rods": 19485, - "mabel": 19486, - "solemn": 19487, - "notified": 19488, - "lyricist": 19489, - "symmetric": 19490, - "##xide": 19491, - "174": 19492, - "encore": 19493, - "passports": 19494, - "wildcats": 19495, - "##uni": 19496, - "baja": 19497, - "##pac": 19498, - "mildly": 19499, - "##ease": 19500, - "bleed": 19501, - "commodity": 19502, - "mounds": 19503, - "glossy": 19504, - "orchestras": 19505, - "##omo": 19506, - "damian": 19507, - "prelude": 19508, - "ambitions": 19509, - "##vet": 19510, - "awhile": 19511, - "remotely": 19512, - "##aud": 19513, - "asserts": 19514, - "imply": 19515, - "##iques": 19516, - "distinctly": 19517, - "modelling": 19518, - "remedy": 19519, - "##dded": 19520, - "windshield": 19521, - "dani": 19522, - "xiao": 19523, - "##endra": 19524, - "audible": 19525, - "powerplant": 19526, - "1300": 19527, - "invalid": 19528, - "elemental": 19529, - "acquisitions": 19530, - "##hala": 19531, - "immaculate": 19532, - "libby": 19533, - "plata": 19534, - "smuggling": 19535, - "ventilation": 19536, - "denoted": 19537, - "minh": 19538, - "##morphism": 19539, - "430": 19540, - "differed": 19541, - "dion": 19542, - "kelley": 19543, - "lore": 19544, - "mocking": 19545, - "sabbath": 19546, - "spikes": 19547, - "hygiene": 19548, - "drown": 19549, - "runoff": 19550, - "stylized": 19551, - "tally": 19552, - "liberated": 19553, - "aux": 19554, - "interpreter": 19555, - "righteous": 19556, - "aba": 19557, - "siren": 19558, - "reaper": 19559, - "pearce": 19560, - "millie": 19561, - "##cier": 19562, - "##yra": 19563, - "gaius": 19564, - "##iso": 19565, - "captures": 19566, - "##ttering": 19567, - "dorm": 19568, - "claudio": 19569, - "##sic": 19570, - "benches": 19571, - "knighted": 19572, - "blackness": 19573, - "##ored": 19574, - "discount": 19575, - "fumble": 19576, - "oxidation": 19577, - "routed": 19578, - "##ς": 19579, - "novak": 19580, - "perpendicular": 19581, - "spoiled": 19582, - "fracture": 19583, - "splits": 19584, - "##urt": 19585, - "pads": 19586, - "topology": 19587, - "##cats": 19588, - "axes": 19589, - "fortunate": 19590, - "offenders": 19591, - "protestants": 19592, - "esteem": 19593, - "221": 19594, - "broadband": 19595, - "convened": 19596, - "frankly": 19597, - "hound": 19598, - "prototypes": 19599, - "isil": 19600, - "facilitated": 19601, - "keel": 19602, - "##sher": 19603, - "sahara": 19604, - "awaited": 19605, - "bubba": 19606, - "orb": 19607, - "prosecutors": 19608, - "186": 19609, - "hem": 19610, - "520": 19611, - "##xing": 19612, - "relaxing": 19613, - "remnant": 19614, - "romney": 19615, - "sorted": 19616, - "slalom": 19617, - "stefano": 19618, - "ulrich": 19619, - "##active": 19620, - "exemption": 19621, - "folder": 19622, - "pauses": 19623, - "foliage": 19624, - "hitchcock": 19625, - "epithet": 19626, - "204": 19627, - "criticisms": 19628, - "##aca": 19629, - "ballistic": 19630, - "brody": 19631, - "hinduism": 19632, - "chaotic": 19633, - "youths": 19634, - "equals": 19635, - "##pala": 19636, - "pts": 19637, - "thicker": 19638, - "analogous": 19639, - "capitalist": 19640, - "improvised": 19641, - "overseeing": 19642, - "sinatra": 19643, - "ascended": 19644, - "beverage": 19645, - "##tl": 19646, - "straightforward": 19647, - "##kon": 19648, - "curran": 19649, - "##west": 19650, - "bois": 19651, - "325": 19652, - "induce": 19653, - "surveying": 19654, - "emperors": 19655, - "sax": 19656, - "unpopular": 19657, - "##kk": 19658, - "cartoonist": 19659, - "fused": 19660, - "##mble": 19661, - "unto": 19662, - "##yuki": 19663, - "localities": 19664, - "##cko": 19665, - "##ln": 19666, - "darlington": 19667, - "slain": 19668, - "academie": 19669, - "lobbying": 19670, - "sediment": 19671, - "puzzles": 19672, - "##grass": 19673, - "defiance": 19674, - "dickens": 19675, - "manifest": 19676, - "tongues": 19677, - "alumnus": 19678, - "arbor": 19679, - "coincide": 19680, - "184": 19681, - "appalachian": 19682, - "mustafa": 19683, - "examiner": 19684, - "cabaret": 19685, - "traumatic": 19686, - "yves": 19687, - "bracelet": 19688, - "draining": 19689, - "heroin": 19690, - "magnum": 19691, - "baths": 19692, - "odessa": 19693, - "consonants": 19694, - "mitsubishi": 19695, - "##gua": 19696, - "kellan": 19697, - "vaudeville": 19698, - "##fr": 19699, - "joked": 19700, - "null": 19701, - "straps": 19702, - "probation": 19703, - "##ław": 19704, - "ceded": 19705, - "interfaces": 19706, - "##pas": 19707, - "##zawa": 19708, - "blinding": 19709, - "viet": 19710, - "224": 19711, - "rothschild": 19712, - "museo": 19713, - "640": 19714, - "huddersfield": 19715, - "##vr": 19716, - "tactic": 19717, - "##storm": 19718, - "brackets": 19719, - "dazed": 19720, - "incorrectly": 19721, - "##vu": 19722, - "reg": 19723, - "glazed": 19724, - "fearful": 19725, - "manifold": 19726, - "benefited": 19727, - "irony": 19728, - "##sun": 19729, - "stumbling": 19730, - "##rte": 19731, - "willingness": 19732, - "balkans": 19733, - "mei": 19734, - "wraps": 19735, - "##aba": 19736, - "injected": 19737, - "##lea": 19738, - "gu": 19739, - "syed": 19740, - "harmless": 19741, - "##hammer": 19742, - "bray": 19743, - "takeoff": 19744, - "poppy": 19745, - "timor": 19746, - "cardboard": 19747, - "astronaut": 19748, - "purdue": 19749, - "weeping": 19750, - "southbound": 19751, - "cursing": 19752, - "stalls": 19753, - "diagonal": 19754, - "##neer": 19755, - "lamar": 19756, - "bryce": 19757, - "comte": 19758, - "weekdays": 19759, - "harrington": 19760, - "##uba": 19761, - "negatively": 19762, - "##see": 19763, - "lays": 19764, - "grouping": 19765, - "##cken": 19766, - "##henko": 19767, - "affirmed": 19768, - "halle": 19769, - "modernist": 19770, - "##lai": 19771, - "hodges": 19772, - "smelling": 19773, - "aristocratic": 19774, - "baptized": 19775, - "dismiss": 19776, - "justification": 19777, - "oilers": 19778, - "##now": 19779, - "coupling": 19780, - "qin": 19781, - "snack": 19782, - "healer": 19783, - "##qing": 19784, - "gardener": 19785, - "layla": 19786, - "battled": 19787, - "formulated": 19788, - "stephenson": 19789, - "gravitational": 19790, - "##gill": 19791, - "##jun": 19792, - "1768": 19793, - "granny": 19794, - "coordinating": 19795, - "suites": 19796, - "##cd": 19797, - "##ioned": 19798, - "monarchs": 19799, - "##cote": 19800, - "##hips": 19801, - "sep": 19802, - "blended": 19803, - "apr": 19804, - "barrister": 19805, - "deposition": 19806, - "fia": 19807, - "mina": 19808, - "policemen": 19809, - "paranoid": 19810, - "##pressed": 19811, - "churchyard": 19812, - "covert": 19813, - "crumpled": 19814, - "creep": 19815, - "abandoning": 19816, - "tr": 19817, - "transmit": 19818, - "conceal": 19819, - "barr": 19820, - "understands": 19821, - "readiness": 19822, - "spire": 19823, - "##cology": 19824, - "##enia": 19825, - "##erry": 19826, - "610": 19827, - "startling": 19828, - "unlock": 19829, - "vida": 19830, - "bowled": 19831, - "slots": 19832, - "##nat": 19833, - "##islav": 19834, - "spaced": 19835, - "trusting": 19836, - "admire": 19837, - "rig": 19838, - "##ink": 19839, - "slack": 19840, - "##70": 19841, - "mv": 19842, - "207": 19843, - "casualty": 19844, - "##wei": 19845, - "classmates": 19846, - "##odes": 19847, - "##rar": 19848, - "##rked": 19849, - "amherst": 19850, - "furnished": 19851, - "evolve": 19852, - "foundry": 19853, - "menace": 19854, - "mead": 19855, - "##lein": 19856, - "flu": 19857, - "wesleyan": 19858, - "##kled": 19859, - "monterey": 19860, - "webber": 19861, - "##vos": 19862, - "wil": 19863, - "##mith": 19864, - "##на": 19865, - "bartholomew": 19866, - "justices": 19867, - "restrained": 19868, - "##cke": 19869, - "amenities": 19870, - "191": 19871, - "mediated": 19872, - "sewage": 19873, - "trenches": 19874, - "ml": 19875, - "mainz": 19876, - "##thus": 19877, - "1800s": 19878, - "##cula": 19879, - "##inski": 19880, - "caine": 19881, - "bonding": 19882, - "213": 19883, - "converts": 19884, - "spheres": 19885, - "superseded": 19886, - "marianne": 19887, - "crypt": 19888, - "sweaty": 19889, - "ensign": 19890, - "historia": 19891, - "##br": 19892, - "spruce": 19893, - "##post": 19894, - "##ask": 19895, - "forks": 19896, - "thoughtfully": 19897, - "yukon": 19898, - "pamphlet": 19899, - "ames": 19900, - "##uter": 19901, - "karma": 19902, - "##yya": 19903, - "bryn": 19904, - "negotiation": 19905, - "sighs": 19906, - "incapable": 19907, - "##mbre": 19908, - "##ntial": 19909, - "actresses": 19910, - "taft": 19911, - "##mill": 19912, - "luce": 19913, - "prevailed": 19914, - "##amine": 19915, - "1773": 19916, - "motionless": 19917, - "envoy": 19918, - "testify": 19919, - "investing": 19920, - "sculpted": 19921, - "instructors": 19922, - "provence": 19923, - "kali": 19924, - "cullen": 19925, - "horseback": 19926, - "##while": 19927, - "goodwin": 19928, - "##jos": 19929, - "gaa": 19930, - "norte": 19931, - "##ldon": 19932, - "modify": 19933, - "wavelength": 19934, - "abd": 19935, - "214": 19936, - "skinned": 19937, - "sprinter": 19938, - "forecast": 19939, - "scheduling": 19940, - "marries": 19941, - "squared": 19942, - "tentative": 19943, - "##chman": 19944, - "boer": 19945, - "##isch": 19946, - "bolts": 19947, - "swap": 19948, - "fisherman": 19949, - "assyrian": 19950, - "impatiently": 19951, - "guthrie": 19952, - "martins": 19953, - "murdoch": 19954, - "194": 19955, - "tanya": 19956, - "nicely": 19957, - "dolly": 19958, - "lacy": 19959, - "med": 19960, - "##45": 19961, - "syn": 19962, - "decks": 19963, - "fashionable": 19964, - "millionaire": 19965, - "##ust": 19966, - "surfing": 19967, - "##ml": 19968, - "##ision": 19969, - "heaved": 19970, - "tammy": 19971, - "consulate": 19972, - "attendees": 19973, - "routinely": 19974, - "197": 19975, - "fuse": 19976, - "saxophonist": 19977, - "backseat": 19978, - "malaya": 19979, - "##lord": 19980, - "scowl": 19981, - "tau": 19982, - "##ishly": 19983, - "193": 19984, - "sighted": 19985, - "steaming": 19986, - "##rks": 19987, - "303": 19988, - "911": 19989, - "##holes": 19990, - "##hong": 19991, - "ching": 19992, - "##wife": 19993, - "bless": 19994, - "conserved": 19995, - "jurassic": 19996, - "stacey": 19997, - "unix": 19998, - "zion": 19999, - "chunk": 20000, - "rigorous": 20001, - "blaine": 20002, - "198": 20003, - "peabody": 20004, - "slayer": 20005, - "dismay": 20006, - "brewers": 20007, - "nz": 20008, - "##jer": 20009, - "det": 20010, - "##glia": 20011, - "glover": 20012, - "postwar": 20013, - "int": 20014, - "penetration": 20015, - "sylvester": 20016, - "imitation": 20017, - "vertically": 20018, - "airlift": 20019, - "heiress": 20020, - "knoxville": 20021, - "viva": 20022, - "##uin": 20023, - "390": 20024, - "macon": 20025, - "##rim": 20026, - "##fighter": 20027, - "##gonal": 20028, - "janice": 20029, - "##orescence": 20030, - "##wari": 20031, - "marius": 20032, - "belongings": 20033, - "leicestershire": 20034, - "196": 20035, - "blanco": 20036, - "inverted": 20037, - "preseason": 20038, - "sanity": 20039, - "sobbing": 20040, - "##due": 20041, - "##elt": 20042, - "##dled": 20043, - "collingwood": 20044, - "regeneration": 20045, - "flickering": 20046, - "shortest": 20047, - "##mount": 20048, - "##osi": 20049, - "feminism": 20050, - "##lat": 20051, - "sherlock": 20052, - "cabinets": 20053, - "fumbled": 20054, - "northbound": 20055, - "precedent": 20056, - "snaps": 20057, - "##mme": 20058, - "researching": 20059, - "##akes": 20060, - "guillaume": 20061, - "insights": 20062, - "manipulated": 20063, - "vapor": 20064, - "neighbour": 20065, - "sap": 20066, - "gangster": 20067, - "frey": 20068, - "f1": 20069, - "stalking": 20070, - "scarcely": 20071, - "callie": 20072, - "barnett": 20073, - "tendencies": 20074, - "audi": 20075, - "doomed": 20076, - "assessing": 20077, - "slung": 20078, - "panchayat": 20079, - "ambiguous": 20080, - "bartlett": 20081, - "##etto": 20082, - "distributing": 20083, - "violating": 20084, - "wolverhampton": 20085, - "##hetic": 20086, - "swami": 20087, - "histoire": 20088, - "##urus": 20089, - "liable": 20090, - "pounder": 20091, - "groin": 20092, - "hussain": 20093, - "larsen": 20094, - "popping": 20095, - "surprises": 20096, - "##atter": 20097, - "vie": 20098, - "curt": 20099, - "##station": 20100, - "mute": 20101, - "relocate": 20102, - "musicals": 20103, - "authorization": 20104, - "richter": 20105, - "##sef": 20106, - "immortality": 20107, - "tna": 20108, - "bombings": 20109, - "##press": 20110, - "deteriorated": 20111, - "yiddish": 20112, - "##acious": 20113, - "robbed": 20114, - "colchester": 20115, - "cs": 20116, - "pmid": 20117, - "ao": 20118, - "verified": 20119, - "balancing": 20120, - "apostle": 20121, - "swayed": 20122, - "recognizable": 20123, - "oxfordshire": 20124, - "retention": 20125, - "nottinghamshire": 20126, - "contender": 20127, - "judd": 20128, - "invitational": 20129, - "shrimp": 20130, - "uhf": 20131, - "##icient": 20132, - "cleaner": 20133, - "longitudinal": 20134, - "tanker": 20135, - "##mur": 20136, - "acronym": 20137, - "broker": 20138, - "koppen": 20139, - "sundance": 20140, - "suppliers": 20141, - "##gil": 20142, - "4000": 20143, - "clipped": 20144, - "fuels": 20145, - "petite": 20146, - "##anne": 20147, - "landslide": 20148, - "helene": 20149, - "diversion": 20150, - "populous": 20151, - "landowners": 20152, - "auspices": 20153, - "melville": 20154, - "quantitative": 20155, - "##xes": 20156, - "ferries": 20157, - "nicky": 20158, - "##llus": 20159, - "doo": 20160, - "haunting": 20161, - "roche": 20162, - "carver": 20163, - "downed": 20164, - "unavailable": 20165, - "##pathy": 20166, - "approximation": 20167, - "hiroshima": 20168, - "##hue": 20169, - "garfield": 20170, - "valle": 20171, - "comparatively": 20172, - "keyboardist": 20173, - "traveler": 20174, - "##eit": 20175, - "congestion": 20176, - "calculating": 20177, - "subsidiaries": 20178, - "##bate": 20179, - "serb": 20180, - "modernization": 20181, - "fairies": 20182, - "deepened": 20183, - "ville": 20184, - "averages": 20185, - "##lore": 20186, - "inflammatory": 20187, - "tonga": 20188, - "##itch": 20189, - "co₂": 20190, - "squads": 20191, - "##hea": 20192, - "gigantic": 20193, - "serum": 20194, - "enjoyment": 20195, - "retailer": 20196, - "verona": 20197, - "35th": 20198, - "cis": 20199, - "##phobic": 20200, - "magna": 20201, - "technicians": 20202, - "##vati": 20203, - "arithmetic": 20204, - "##sport": 20205, - "levin": 20206, - "##dation": 20207, - "amtrak": 20208, - "chow": 20209, - "sienna": 20210, - "##eyer": 20211, - "backstage": 20212, - "entrepreneurship": 20213, - "##otic": 20214, - "learnt": 20215, - "tao": 20216, - "##udy": 20217, - "worcestershire": 20218, - "formulation": 20219, - "baggage": 20220, - "hesitant": 20221, - "bali": 20222, - "sabotage": 20223, - "##kari": 20224, - "barren": 20225, - "enhancing": 20226, - "murmur": 20227, - "pl": 20228, - "freshly": 20229, - "putnam": 20230, - "syntax": 20231, - "aces": 20232, - "medicines": 20233, - "resentment": 20234, - "bandwidth": 20235, - "##sier": 20236, - "grins": 20237, - "chili": 20238, - "guido": 20239, - "##sei": 20240, - "framing": 20241, - "implying": 20242, - "gareth": 20243, - "lissa": 20244, - "genevieve": 20245, - "pertaining": 20246, - "admissions": 20247, - "geo": 20248, - "thorpe": 20249, - "proliferation": 20250, - "sato": 20251, - "bela": 20252, - "analyzing": 20253, - "parting": 20254, - "##gor": 20255, - "awakened": 20256, - "##isman": 20257, - "huddled": 20258, - "secrecy": 20259, - "##kling": 20260, - "hush": 20261, - "gentry": 20262, - "540": 20263, - "dungeons": 20264, - "##ego": 20265, - "coasts": 20266, - "##utz": 20267, - "sacrificed": 20268, - "##chule": 20269, - "landowner": 20270, - "mutually": 20271, - "prevalence": 20272, - "programmer": 20273, - "adolescent": 20274, - "disrupted": 20275, - "seaside": 20276, - "gee": 20277, - "trusts": 20278, - "vamp": 20279, - "georgie": 20280, - "##nesian": 20281, - "##iol": 20282, - "schedules": 20283, - "sindh": 20284, - "##market": 20285, - "etched": 20286, - "hm": 20287, - "sparse": 20288, - "bey": 20289, - "beaux": 20290, - "scratching": 20291, - "gliding": 20292, - "unidentified": 20293, - "216": 20294, - "collaborating": 20295, - "gems": 20296, - "jesuits": 20297, - "oro": 20298, - "accumulation": 20299, - "shaping": 20300, - "mbe": 20301, - "anal": 20302, - "##xin": 20303, - "231": 20304, - "enthusiasts": 20305, - "newscast": 20306, - "##egan": 20307, - "janata": 20308, - "dewey": 20309, - "parkinson": 20310, - "179": 20311, - "ankara": 20312, - "biennial": 20313, - "towering": 20314, - "dd": 20315, - "inconsistent": 20316, - "950": 20317, - "##chet": 20318, - "thriving": 20319, - "terminate": 20320, - "cabins": 20321, - "furiously": 20322, - "eats": 20323, - "advocating": 20324, - "donkey": 20325, - "marley": 20326, - "muster": 20327, - "phyllis": 20328, - "leiden": 20329, - "##user": 20330, - "grassland": 20331, - "glittering": 20332, - "iucn": 20333, - "loneliness": 20334, - "217": 20335, - "memorandum": 20336, - "armenians": 20337, - "##ddle": 20338, - "popularized": 20339, - "rhodesia": 20340, - "60s": 20341, - "lame": 20342, - "##illon": 20343, - "sans": 20344, - "bikini": 20345, - "header": 20346, - "orbits": 20347, - "##xx": 20348, - "##finger": 20349, - "##ulator": 20350, - "sharif": 20351, - "spines": 20352, - "biotechnology": 20353, - "strolled": 20354, - "naughty": 20355, - "yates": 20356, - "##wire": 20357, - "fremantle": 20358, - "milo": 20359, - "##mour": 20360, - "abducted": 20361, - "removes": 20362, - "##atin": 20363, - "humming": 20364, - "wonderland": 20365, - "##chrome": 20366, - "##ester": 20367, - "hume": 20368, - "pivotal": 20369, - "##rates": 20370, - "armand": 20371, - "grams": 20372, - "believers": 20373, - "elector": 20374, - "rte": 20375, - "apron": 20376, - "bis": 20377, - "scraped": 20378, - "##yria": 20379, - "endorsement": 20380, - "initials": 20381, - "##llation": 20382, - "eps": 20383, - "dotted": 20384, - "hints": 20385, - "buzzing": 20386, - "emigration": 20387, - "nearer": 20388, - "##tom": 20389, - "indicators": 20390, - "##ulu": 20391, - "coarse": 20392, - "neutron": 20393, - "protectorate": 20394, - "##uze": 20395, - "directional": 20396, - "exploits": 20397, - "pains": 20398, - "loire": 20399, - "1830s": 20400, - "proponents": 20401, - "guggenheim": 20402, - "rabbits": 20403, - "ritchie": 20404, - "305": 20405, - "hectare": 20406, - "inputs": 20407, - "hutton": 20408, - "##raz": 20409, - "verify": 20410, - "##ako": 20411, - "boilers": 20412, - "longitude": 20413, - "##lev": 20414, - "skeletal": 20415, - "yer": 20416, - "emilia": 20417, - "citrus": 20418, - "compromised": 20419, - "##gau": 20420, - "pokemon": 20421, - "prescription": 20422, - "paragraph": 20423, - "eduard": 20424, - "cadillac": 20425, - "attire": 20426, - "categorized": 20427, - "kenyan": 20428, - "weddings": 20429, - "charley": 20430, - "##bourg": 20431, - "entertain": 20432, - "monmouth": 20433, - "##lles": 20434, - "nutrients": 20435, - "davey": 20436, - "mesh": 20437, - "incentive": 20438, - "practised": 20439, - "ecosystems": 20440, - "kemp": 20441, - "subdued": 20442, - "overheard": 20443, - "##rya": 20444, - "bodily": 20445, - "maxim": 20446, - "##nius": 20447, - "apprenticeship": 20448, - "ursula": 20449, - "##fight": 20450, - "lodged": 20451, - "rug": 20452, - "silesian": 20453, - "unconstitutional": 20454, - "patel": 20455, - "inspected": 20456, - "coyote": 20457, - "unbeaten": 20458, - "##hak": 20459, - "34th": 20460, - "disruption": 20461, - "convict": 20462, - "parcel": 20463, - "##cl": 20464, - "##nham": 20465, - "collier": 20466, - "implicated": 20467, - "mallory": 20468, - "##iac": 20469, - "##lab": 20470, - "susannah": 20471, - "winkler": 20472, - "##rber": 20473, - "shia": 20474, - "phelps": 20475, - "sediments": 20476, - "graphical": 20477, - "robotic": 20478, - "##sner": 20479, - "adulthood": 20480, - "mart": 20481, - "smoked": 20482, - "##isto": 20483, - "kathryn": 20484, - "clarified": 20485, - "##aran": 20486, - "divides": 20487, - "convictions": 20488, - "oppression": 20489, - "pausing": 20490, - "burying": 20491, - "##mt": 20492, - "federico": 20493, - "mathias": 20494, - "eileen": 20495, - "##tana": 20496, - "kite": 20497, - "hunched": 20498, - "##acies": 20499, - "189": 20500, - "##atz": 20501, - "disadvantage": 20502, - "liza": 20503, - "kinetic": 20504, - "greedy": 20505, - "paradox": 20506, - "yokohama": 20507, - "dowager": 20508, - "trunks": 20509, - "ventured": 20510, - "##gement": 20511, - "gupta": 20512, - "vilnius": 20513, - "olaf": 20514, - "##thest": 20515, - "crimean": 20516, - "hopper": 20517, - "##ej": 20518, - "progressively": 20519, - "arturo": 20520, - "mouthed": 20521, - "arrondissement": 20522, - "##fusion": 20523, - "rubin": 20524, - "simulcast": 20525, - "oceania": 20526, - "##orum": 20527, - "##stra": 20528, - "##rred": 20529, - "busiest": 20530, - "intensely": 20531, - "navigator": 20532, - "cary": 20533, - "##vine": 20534, - "##hini": 20535, - "##bies": 20536, - "fife": 20537, - "rowe": 20538, - "rowland": 20539, - "posing": 20540, - "insurgents": 20541, - "shafts": 20542, - "lawsuits": 20543, - "activate": 20544, - "conor": 20545, - "inward": 20546, - "culturally": 20547, - "garlic": 20548, - "265": 20549, - "##eering": 20550, - "eclectic": 20551, - "##hui": 20552, - "##kee": 20553, - "##nl": 20554, - "furrowed": 20555, - "vargas": 20556, - "meteorological": 20557, - "rendezvous": 20558, - "##aus": 20559, - "culinary": 20560, - "commencement": 20561, - "##dition": 20562, - "quota": 20563, - "##notes": 20564, - "mommy": 20565, - "salaries": 20566, - "overlapping": 20567, - "mule": 20568, - "##iology": 20569, - "##mology": 20570, - "sums": 20571, - "wentworth": 20572, - "##isk": 20573, - "##zione": 20574, - "mainline": 20575, - "subgroup": 20576, - "##illy": 20577, - "hack": 20578, - "plaintiff": 20579, - "verdi": 20580, - "bulb": 20581, - "differentiation": 20582, - "engagements": 20583, - "multinational": 20584, - "supplemented": 20585, - "bertrand": 20586, - "caller": 20587, - "regis": 20588, - "##naire": 20589, - "##sler": 20590, - "##arts": 20591, - "##imated": 20592, - "blossom": 20593, - "propagation": 20594, - "kilometer": 20595, - "viaduct": 20596, - "vineyards": 20597, - "##uate": 20598, - "beckett": 20599, - "optimization": 20600, - "golfer": 20601, - "songwriters": 20602, - "seminal": 20603, - "semitic": 20604, - "thud": 20605, - "volatile": 20606, - "evolving": 20607, - "ridley": 20608, - "##wley": 20609, - "trivial": 20610, - "distributions": 20611, - "scandinavia": 20612, - "jiang": 20613, - "##ject": 20614, - "wrestled": 20615, - "insistence": 20616, - "##dio": 20617, - "emphasizes": 20618, - "napkin": 20619, - "##ods": 20620, - "adjunct": 20621, - "rhyme": 20622, - "##ricted": 20623, - "##eti": 20624, - "hopeless": 20625, - "surrounds": 20626, - "tremble": 20627, - "32nd": 20628, - "smoky": 20629, - "##ntly": 20630, - "oils": 20631, - "medicinal": 20632, - "padded": 20633, - "steer": 20634, - "wilkes": 20635, - "219": 20636, - "255": 20637, - "concessions": 20638, - "hue": 20639, - "uniquely": 20640, - "blinded": 20641, - "landon": 20642, - "yahoo": 20643, - "##lane": 20644, - "hendrix": 20645, - "commemorating": 20646, - "dex": 20647, - "specify": 20648, - "chicks": 20649, - "##ggio": 20650, - "intercity": 20651, - "1400": 20652, - "morley": 20653, - "##torm": 20654, - "highlighting": 20655, - "##oting": 20656, - "pang": 20657, - "oblique": 20658, - "stalled": 20659, - "##liner": 20660, - "flirting": 20661, - "newborn": 20662, - "1769": 20663, - "bishopric": 20664, - "shaved": 20665, - "232": 20666, - "currie": 20667, - "##ush": 20668, - "dharma": 20669, - "spartan": 20670, - "##ooped": 20671, - "favorites": 20672, - "smug": 20673, - "novella": 20674, - "sirens": 20675, - "abusive": 20676, - "creations": 20677, - "espana": 20678, - "##lage": 20679, - "paradigm": 20680, - "semiconductor": 20681, - "sheen": 20682, - "##rdo": 20683, - "##yen": 20684, - "##zak": 20685, - "nrl": 20686, - "renew": 20687, - "##pose": 20688, - "##tur": 20689, - "adjutant": 20690, - "marches": 20691, - "norma": 20692, - "##enity": 20693, - "ineffective": 20694, - "weimar": 20695, - "grunt": 20696, - "##gat": 20697, - "lordship": 20698, - "plotting": 20699, - "expenditure": 20700, - "infringement": 20701, - "lbs": 20702, - "refrain": 20703, - "av": 20704, - "mimi": 20705, - "mistakenly": 20706, - "postmaster": 20707, - "1771": 20708, - "##bara": 20709, - "ras": 20710, - "motorsports": 20711, - "tito": 20712, - "199": 20713, - "subjective": 20714, - "##zza": 20715, - "bully": 20716, - "stew": 20717, - "##kaya": 20718, - "prescott": 20719, - "1a": 20720, - "##raphic": 20721, - "##zam": 20722, - "bids": 20723, - "styling": 20724, - "paranormal": 20725, - "reeve": 20726, - "sneaking": 20727, - "exploding": 20728, - "katz": 20729, - "akbar": 20730, - "migrant": 20731, - "syllables": 20732, - "indefinitely": 20733, - "##ogical": 20734, - "destroys": 20735, - "replaces": 20736, - "applause": 20737, - "##phine": 20738, - "pest": 20739, - "##fide": 20740, - "218": 20741, - "articulated": 20742, - "bertie": 20743, - "##thing": 20744, - "##cars": 20745, - "##ptic": 20746, - "courtroom": 20747, - "crowley": 20748, - "aesthetics": 20749, - "cummings": 20750, - "tehsil": 20751, - "hormones": 20752, - "titanic": 20753, - "dangerously": 20754, - "##ibe": 20755, - "stadion": 20756, - "jaenelle": 20757, - "auguste": 20758, - "ciudad": 20759, - "##chu": 20760, - "mysore": 20761, - "partisans": 20762, - "##sio": 20763, - "lucan": 20764, - "philipp": 20765, - "##aly": 20766, - "debating": 20767, - "henley": 20768, - "interiors": 20769, - "##rano": 20770, - "##tious": 20771, - "homecoming": 20772, - "beyonce": 20773, - "usher": 20774, - "henrietta": 20775, - "prepares": 20776, - "weeds": 20777, - "##oman": 20778, - "ely": 20779, - "plucked": 20780, - "##pire": 20781, - "##dable": 20782, - "luxurious": 20783, - "##aq": 20784, - "artifact": 20785, - "password": 20786, - "pasture": 20787, - "juno": 20788, - "maddy": 20789, - "minsk": 20790, - "##dder": 20791, - "##ologies": 20792, - "##rone": 20793, - "assessments": 20794, - "martian": 20795, - "royalist": 20796, - "1765": 20797, - "examines": 20798, - "##mani": 20799, - "##rge": 20800, - "nino": 20801, - "223": 20802, - "parry": 20803, - "scooped": 20804, - "relativity": 20805, - "##eli": 20806, - "##uting": 20807, - "##cao": 20808, - "congregational": 20809, - "noisy": 20810, - "traverse": 20811, - "##agawa": 20812, - "strikeouts": 20813, - "nickelodeon": 20814, - "obituary": 20815, - "transylvania": 20816, - "binds": 20817, - "depictions": 20818, - "polk": 20819, - "trolley": 20820, - "##yed": 20821, - "##lard": 20822, - "breeders": 20823, - "##under": 20824, - "dryly": 20825, - "hokkaido": 20826, - "1762": 20827, - "strengths": 20828, - "stacks": 20829, - "bonaparte": 20830, - "connectivity": 20831, - "neared": 20832, - "prostitutes": 20833, - "stamped": 20834, - "anaheim": 20835, - "gutierrez": 20836, - "sinai": 20837, - "##zzling": 20838, - "bram": 20839, - "fresno": 20840, - "madhya": 20841, - "##86": 20842, - "proton": 20843, - "##lena": 20844, - "##llum": 20845, - "##phon": 20846, - "reelected": 20847, - "wanda": 20848, - "##anus": 20849, - "##lb": 20850, - "ample": 20851, - "distinguishing": 20852, - "##yler": 20853, - "grasping": 20854, - "sermons": 20855, - "tomato": 20856, - "bland": 20857, - "stimulation": 20858, - "avenues": 20859, - "##eux": 20860, - "spreads": 20861, - "scarlett": 20862, - "fern": 20863, - "pentagon": 20864, - "assert": 20865, - "baird": 20866, - "chesapeake": 20867, - "ir": 20868, - "calmed": 20869, - "distortion": 20870, - "fatalities": 20871, - "##olis": 20872, - "correctional": 20873, - "pricing": 20874, - "##astic": 20875, - "##gina": 20876, - "prom": 20877, - "dammit": 20878, - "ying": 20879, - "collaborate": 20880, - "##chia": 20881, - "welterweight": 20882, - "33rd": 20883, - "pointer": 20884, - "substitution": 20885, - "bonded": 20886, - "umpire": 20887, - "communicating": 20888, - "multitude": 20889, - "paddle": 20890, - "##obe": 20891, - "federally": 20892, - "intimacy": 20893, - "##insky": 20894, - "betray": 20895, - "ssr": 20896, - "##lett": 20897, - "##lean": 20898, - "##lves": 20899, - "##therapy": 20900, - "airbus": 20901, - "##tery": 20902, - "functioned": 20903, - "ud": 20904, - "bearer": 20905, - "biomedical": 20906, - "netflix": 20907, - "##hire": 20908, - "##nca": 20909, - "condom": 20910, - "brink": 20911, - "ik": 20912, - "##nical": 20913, - "macy": 20914, - "##bet": 20915, - "flap": 20916, - "gma": 20917, - "experimented": 20918, - "jelly": 20919, - "lavender": 20920, - "##icles": 20921, - "##ulia": 20922, - "munro": 20923, - "##mian": 20924, - "##tial": 20925, - "rye": 20926, - "##rle": 20927, - "60th": 20928, - "gigs": 20929, - "hottest": 20930, - "rotated": 20931, - "predictions": 20932, - "fuji": 20933, - "bu": 20934, - "##erence": 20935, - "##omi": 20936, - "barangay": 20937, - "##fulness": 20938, - "##sas": 20939, - "clocks": 20940, - "##rwood": 20941, - "##liness": 20942, - "cereal": 20943, - "roe": 20944, - "wight": 20945, - "decker": 20946, - "uttered": 20947, - "babu": 20948, - "onion": 20949, - "xml": 20950, - "forcibly": 20951, - "##df": 20952, - "petra": 20953, - "sarcasm": 20954, - "hartley": 20955, - "peeled": 20956, - "storytelling": 20957, - "##42": 20958, - "##xley": 20959, - "##ysis": 20960, - "##ffa": 20961, - "fibre": 20962, - "kiel": 20963, - "auditor": 20964, - "fig": 20965, - "harald": 20966, - "greenville": 20967, - "##berries": 20968, - "geographically": 20969, - "nell": 20970, - "quartz": 20971, - "##athic": 20972, - "cemeteries": 20973, - "##lr": 20974, - "crossings": 20975, - "nah": 20976, - "holloway": 20977, - "reptiles": 20978, - "chun": 20979, - "sichuan": 20980, - "snowy": 20981, - "660": 20982, - "corrections": 20983, - "##ivo": 20984, - "zheng": 20985, - "ambassadors": 20986, - "blacksmith": 20987, - "fielded": 20988, - "fluids": 20989, - "hardcover": 20990, - "turnover": 20991, - "medications": 20992, - "melvin": 20993, - "academies": 20994, - "##erton": 20995, - "ro": 20996, - "roach": 20997, - "absorbing": 20998, - "spaniards": 20999, - "colton": 21000, - "##founded": 21001, - "outsider": 21002, - "espionage": 21003, - "kelsey": 21004, - "245": 21005, - "edible": 21006, - "##ulf": 21007, - "dora": 21008, - "establishes": 21009, - "##sham": 21010, - "##tries": 21011, - "contracting": 21012, - "##tania": 21013, - "cinematic": 21014, - "costello": 21015, - "nesting": 21016, - "##uron": 21017, - "connolly": 21018, - "duff": 21019, - "##nology": 21020, - "mma": 21021, - "##mata": 21022, - "fergus": 21023, - "sexes": 21024, - "gi": 21025, - "optics": 21026, - "spectator": 21027, - "woodstock": 21028, - "banning": 21029, - "##hee": 21030, - "##fle": 21031, - "differentiate": 21032, - "outfielder": 21033, - "refinery": 21034, - "226": 21035, - "312": 21036, - "gerhard": 21037, - "horde": 21038, - "lair": 21039, - "drastically": 21040, - "##udi": 21041, - "landfall": 21042, - "##cheng": 21043, - "motorsport": 21044, - "odi": 21045, - "##achi": 21046, - "predominant": 21047, - "quay": 21048, - "skins": 21049, - "##ental": 21050, - "edna": 21051, - "harshly": 21052, - "complementary": 21053, - "murdering": 21054, - "##aves": 21055, - "wreckage": 21056, - "##90": 21057, - "ono": 21058, - "outstretched": 21059, - "lennox": 21060, - "munitions": 21061, - "galen": 21062, - "reconcile": 21063, - "470": 21064, - "scalp": 21065, - "bicycles": 21066, - "gillespie": 21067, - "questionable": 21068, - "rosenberg": 21069, - "guillermo": 21070, - "hostel": 21071, - "jarvis": 21072, - "kabul": 21073, - "volvo": 21074, - "opium": 21075, - "yd": 21076, - "##twined": 21077, - "abuses": 21078, - "decca": 21079, - "outpost": 21080, - "##cino": 21081, - "sensible": 21082, - "neutrality": 21083, - "##64": 21084, - "ponce": 21085, - "anchorage": 21086, - "atkins": 21087, - "turrets": 21088, - "inadvertently": 21089, - "disagree": 21090, - "libre": 21091, - "vodka": 21092, - "reassuring": 21093, - "weighs": 21094, - "##yal": 21095, - "glide": 21096, - "jumper": 21097, - "ceilings": 21098, - "repertory": 21099, - "outs": 21100, - "stain": 21101, - "##bial": 21102, - "envy": 21103, - "##ucible": 21104, - "smashing": 21105, - "heightened": 21106, - "policing": 21107, - "hyun": 21108, - "mixes": 21109, - "lai": 21110, - "prima": 21111, - "##ples": 21112, - "celeste": 21113, - "##bina": 21114, - "lucrative": 21115, - "intervened": 21116, - "kc": 21117, - "manually": 21118, - "##rned": 21119, - "stature": 21120, - "staffed": 21121, - "bun": 21122, - "bastards": 21123, - "nairobi": 21124, - "priced": 21125, - "##auer": 21126, - "thatcher": 21127, - "##kia": 21128, - "tripped": 21129, - "comune": 21130, - "##ogan": 21131, - "##pled": 21132, - "brasil": 21133, - "incentives": 21134, - "emanuel": 21135, - "hereford": 21136, - "musica": 21137, - "##kim": 21138, - "benedictine": 21139, - "biennale": 21140, - "##lani": 21141, - "eureka": 21142, - "gardiner": 21143, - "rb": 21144, - "knocks": 21145, - "sha": 21146, - "##ael": 21147, - "##elled": 21148, - "##onate": 21149, - "efficacy": 21150, - "ventura": 21151, - "masonic": 21152, - "sanford": 21153, - "maize": 21154, - "leverage": 21155, - "##feit": 21156, - "capacities": 21157, - "santana": 21158, - "##aur": 21159, - "novelty": 21160, - "vanilla": 21161, - "##cter": 21162, - "##tour": 21163, - "benin": 21164, - "##oir": 21165, - "##rain": 21166, - "neptune": 21167, - "drafting": 21168, - "tallinn": 21169, - "##cable": 21170, - "humiliation": 21171, - "##boarding": 21172, - "schleswig": 21173, - "fabian": 21174, - "bernardo": 21175, - "liturgy": 21176, - "spectacle": 21177, - "sweeney": 21178, - "pont": 21179, - "routledge": 21180, - "##tment": 21181, - "cosmos": 21182, - "ut": 21183, - "hilt": 21184, - "sleek": 21185, - "universally": 21186, - "##eville": 21187, - "##gawa": 21188, - "typed": 21189, - "##dry": 21190, - "favors": 21191, - "allegheny": 21192, - "glaciers": 21193, - "##rly": 21194, - "recalling": 21195, - "aziz": 21196, - "##log": 21197, - "parasite": 21198, - "requiem": 21199, - "auf": 21200, - "##berto": 21201, - "##llin": 21202, - "illumination": 21203, - "##breaker": 21204, - "##issa": 21205, - "festivities": 21206, - "bows": 21207, - "govern": 21208, - "vibe": 21209, - "vp": 21210, - "333": 21211, - "sprawled": 21212, - "larson": 21213, - "pilgrim": 21214, - "bwf": 21215, - "leaping": 21216, - "##rts": 21217, - "##ssel": 21218, - "alexei": 21219, - "greyhound": 21220, - "hoarse": 21221, - "##dler": 21222, - "##oration": 21223, - "seneca": 21224, - "##cule": 21225, - "gaping": 21226, - "##ulously": 21227, - "##pura": 21228, - "cinnamon": 21229, - "##gens": 21230, - "##rricular": 21231, - "craven": 21232, - "fantasies": 21233, - "houghton": 21234, - "engined": 21235, - "reigned": 21236, - "dictator": 21237, - "supervising": 21238, - "##oris": 21239, - "bogota": 21240, - "commentaries": 21241, - "unnatural": 21242, - "fingernails": 21243, - "spirituality": 21244, - "tighten": 21245, - "##tm": 21246, - "canadiens": 21247, - "protesting": 21248, - "intentional": 21249, - "cheers": 21250, - "sparta": 21251, - "##ytic": 21252, - "##iere": 21253, - "##zine": 21254, - "widen": 21255, - "belgarath": 21256, - "controllers": 21257, - "dodd": 21258, - "iaaf": 21259, - "navarre": 21260, - "##ication": 21261, - "defect": 21262, - "squire": 21263, - "steiner": 21264, - "whisky": 21265, - "##mins": 21266, - "560": 21267, - "inevitably": 21268, - "tome": 21269, - "##gold": 21270, - "chew": 21271, - "##uid": 21272, - "##lid": 21273, - "elastic": 21274, - "##aby": 21275, - "streaked": 21276, - "alliances": 21277, - "jailed": 21278, - "regal": 21279, - "##ined": 21280, - "##phy": 21281, - "czechoslovak": 21282, - "narration": 21283, - "absently": 21284, - "##uld": 21285, - "bluegrass": 21286, - "guangdong": 21287, - "quran": 21288, - "criticizing": 21289, - "hose": 21290, - "hari": 21291, - "##liest": 21292, - "##owa": 21293, - "skier": 21294, - "streaks": 21295, - "deploy": 21296, - "##lom": 21297, - "raft": 21298, - "bose": 21299, - "dialed": 21300, - "huff": 21301, - "##eira": 21302, - "haifa": 21303, - "simplest": 21304, - "bursting": 21305, - "endings": 21306, - "ib": 21307, - "sultanate": 21308, - "##titled": 21309, - "franks": 21310, - "whitman": 21311, - "ensures": 21312, - "sven": 21313, - "##ggs": 21314, - "collaborators": 21315, - "forster": 21316, - "organising": 21317, - "ui": 21318, - "banished": 21319, - "napier": 21320, - "injustice": 21321, - "teller": 21322, - "layered": 21323, - "thump": 21324, - "##otti": 21325, - "roc": 21326, - "battleships": 21327, - "evidenced": 21328, - "fugitive": 21329, - "sadie": 21330, - "robotics": 21331, - "##roud": 21332, - "equatorial": 21333, - "geologist": 21334, - "##iza": 21335, - "yielding": 21336, - "##bron": 21337, - "##sr": 21338, - "internationale": 21339, - "mecca": 21340, - "##diment": 21341, - "sbs": 21342, - "skyline": 21343, - "toad": 21344, - "uploaded": 21345, - "reflective": 21346, - "undrafted": 21347, - "lal": 21348, - "leafs": 21349, - "bayern": 21350, - "##dai": 21351, - "lakshmi": 21352, - "shortlisted": 21353, - "##stick": 21354, - "##wicz": 21355, - "camouflage": 21356, - "donate": 21357, - "af": 21358, - "christi": 21359, - "lau": 21360, - "##acio": 21361, - "disclosed": 21362, - "nemesis": 21363, - "1761": 21364, - "assemble": 21365, - "straining": 21366, - "northamptonshire": 21367, - "tal": 21368, - "##asi": 21369, - "bernardino": 21370, - "premature": 21371, - "heidi": 21372, - "42nd": 21373, - "coefficients": 21374, - "galactic": 21375, - "reproduce": 21376, - "buzzed": 21377, - "sensations": 21378, - "zionist": 21379, - "monsieur": 21380, - "myrtle": 21381, - "##eme": 21382, - "archery": 21383, - "strangled": 21384, - "musically": 21385, - "viewpoint": 21386, - "antiquities": 21387, - "bei": 21388, - "trailers": 21389, - "seahawks": 21390, - "cured": 21391, - "pee": 21392, - "preferring": 21393, - "tasmanian": 21394, - "lange": 21395, - "sul": 21396, - "##mail": 21397, - "##working": 21398, - "colder": 21399, - "overland": 21400, - "lucivar": 21401, - "massey": 21402, - "gatherings": 21403, - "haitian": 21404, - "##smith": 21405, - "disapproval": 21406, - "flaws": 21407, - "##cco": 21408, - "##enbach": 21409, - "1766": 21410, - "npr": 21411, - "##icular": 21412, - "boroughs": 21413, - "creole": 21414, - "forums": 21415, - "techno": 21416, - "1755": 21417, - "dent": 21418, - "abdominal": 21419, - "streetcar": 21420, - "##eson": 21421, - "##stream": 21422, - "procurement": 21423, - "gemini": 21424, - "predictable": 21425, - "##tya": 21426, - "acheron": 21427, - "christoph": 21428, - "feeder": 21429, - "fronts": 21430, - "vendor": 21431, - "bernhard": 21432, - "jammu": 21433, - "tumors": 21434, - "slang": 21435, - "##uber": 21436, - "goaltender": 21437, - "twists": 21438, - "curving": 21439, - "manson": 21440, - "vuelta": 21441, - "mer": 21442, - "peanut": 21443, - "confessions": 21444, - "pouch": 21445, - "unpredictable": 21446, - "allowance": 21447, - "theodor": 21448, - "vascular": 21449, - "##factory": 21450, - "bala": 21451, - "authenticity": 21452, - "metabolic": 21453, - "coughing": 21454, - "nanjing": 21455, - "##cea": 21456, - "pembroke": 21457, - "##bard": 21458, - "splendid": 21459, - "36th": 21460, - "ff": 21461, - "hourly": 21462, - "##ahu": 21463, - "elmer": 21464, - "handel": 21465, - "##ivate": 21466, - "awarding": 21467, - "thrusting": 21468, - "dl": 21469, - "experimentation": 21470, - "##hesion": 21471, - "##46": 21472, - "caressed": 21473, - "entertained": 21474, - "steak": 21475, - "##rangle": 21476, - "biologist": 21477, - "orphans": 21478, - "baroness": 21479, - "oyster": 21480, - "stepfather": 21481, - "##dridge": 21482, - "mirage": 21483, - "reefs": 21484, - "speeding": 21485, - "##31": 21486, - "barons": 21487, - "1764": 21488, - "227": 21489, - "inhabit": 21490, - "preached": 21491, - "repealed": 21492, - "##tral": 21493, - "honoring": 21494, - "boogie": 21495, - "captives": 21496, - "administer": 21497, - "johanna": 21498, - "##imate": 21499, - "gel": 21500, - "suspiciously": 21501, - "1767": 21502, - "sobs": 21503, - "##dington": 21504, - "backbone": 21505, - "hayward": 21506, - "garry": 21507, - "##folding": 21508, - "##nesia": 21509, - "maxi": 21510, - "##oof": 21511, - "##ppe": 21512, - "ellison": 21513, - "galileo": 21514, - "##stand": 21515, - "crimea": 21516, - "frenzy": 21517, - "amour": 21518, - "bumper": 21519, - "matrices": 21520, - "natalia": 21521, - "baking": 21522, - "garth": 21523, - "palestinians": 21524, - "##grove": 21525, - "smack": 21526, - "conveyed": 21527, - "ensembles": 21528, - "gardening": 21529, - "##manship": 21530, - "##rup": 21531, - "##stituting": 21532, - "1640": 21533, - "harvesting": 21534, - "topography": 21535, - "jing": 21536, - "shifters": 21537, - "dormitory": 21538, - "##carriage": 21539, - "##lston": 21540, - "ist": 21541, - "skulls": 21542, - "##stadt": 21543, - "dolores": 21544, - "jewellery": 21545, - "sarawak": 21546, - "##wai": 21547, - "##zier": 21548, - "fences": 21549, - "christy": 21550, - "confinement": 21551, - "tumbling": 21552, - "credibility": 21553, - "fir": 21554, - "stench": 21555, - "##bria": 21556, - "##plication": 21557, - "##nged": 21558, - "##sam": 21559, - "virtues": 21560, - "##belt": 21561, - "marjorie": 21562, - "pba": 21563, - "##eem": 21564, - "##made": 21565, - "celebrates": 21566, - "schooner": 21567, - "agitated": 21568, - "barley": 21569, - "fulfilling": 21570, - "anthropologist": 21571, - "##pro": 21572, - "restrict": 21573, - "novi": 21574, - "regulating": 21575, - "##nent": 21576, - "padres": 21577, - "##rani": 21578, - "##hesive": 21579, - "loyola": 21580, - "tabitha": 21581, - "milky": 21582, - "olson": 21583, - "proprietor": 21584, - "crambidae": 21585, - "guarantees": 21586, - "intercollegiate": 21587, - "ljubljana": 21588, - "hilda": 21589, - "##sko": 21590, - "ignorant": 21591, - "hooded": 21592, - "##lts": 21593, - "sardinia": 21594, - "##lidae": 21595, - "##vation": 21596, - "frontman": 21597, - "privileged": 21598, - "witchcraft": 21599, - "##gp": 21600, - "jammed": 21601, - "laude": 21602, - "poking": 21603, - "##than": 21604, - "bracket": 21605, - "amazement": 21606, - "yunnan": 21607, - "##erus": 21608, - "maharaja": 21609, - "linnaeus": 21610, - "264": 21611, - "commissioning": 21612, - "milano": 21613, - "peacefully": 21614, - "##logies": 21615, - "akira": 21616, - "rani": 21617, - "regulator": 21618, - "##36": 21619, - "grasses": 21620, - "##rance": 21621, - "luzon": 21622, - "crows": 21623, - "compiler": 21624, - "gretchen": 21625, - "seaman": 21626, - "edouard": 21627, - "tab": 21628, - "buccaneers": 21629, - "ellington": 21630, - "hamlets": 21631, - "whig": 21632, - "socialists": 21633, - "##anto": 21634, - "directorial": 21635, - "easton": 21636, - "mythological": 21637, - "##kr": 21638, - "##vary": 21639, - "rhineland": 21640, - "semantic": 21641, - "taut": 21642, - "dune": 21643, - "inventions": 21644, - "succeeds": 21645, - "##iter": 21646, - "replication": 21647, - "branched": 21648, - "##pired": 21649, - "jul": 21650, - "prosecuted": 21651, - "kangaroo": 21652, - "penetrated": 21653, - "##avian": 21654, - "middlesbrough": 21655, - "doses": 21656, - "bleak": 21657, - "madam": 21658, - "predatory": 21659, - "relentless": 21660, - "##vili": 21661, - "reluctance": 21662, - "##vir": 21663, - "hailey": 21664, - "crore": 21665, - "silvery": 21666, - "1759": 21667, - "monstrous": 21668, - "swimmers": 21669, - "transmissions": 21670, - "hawthorn": 21671, - "informing": 21672, - "##eral": 21673, - "toilets": 21674, - "caracas": 21675, - "crouch": 21676, - "kb": 21677, - "##sett": 21678, - "295": 21679, - "cartel": 21680, - "hadley": 21681, - "##aling": 21682, - "alexia": 21683, - "yvonne": 21684, - "##biology": 21685, - "cinderella": 21686, - "eton": 21687, - "superb": 21688, - "blizzard": 21689, - "stabbing": 21690, - "industrialist": 21691, - "maximus": 21692, - "##gm": 21693, - "##orus": 21694, - "groves": 21695, - "maud": 21696, - "clade": 21697, - "oversized": 21698, - "comedic": 21699, - "##bella": 21700, - "rosen": 21701, - "nomadic": 21702, - "fulham": 21703, - "montane": 21704, - "beverages": 21705, - "galaxies": 21706, - "redundant": 21707, - "swarm": 21708, - "##rot": 21709, - "##folia": 21710, - "##llis": 21711, - "buckinghamshire": 21712, - "fen": 21713, - "bearings": 21714, - "bahadur": 21715, - "##rom": 21716, - "gilles": 21717, - "phased": 21718, - "dynamite": 21719, - "faber": 21720, - "benoit": 21721, - "vip": 21722, - "##ount": 21723, - "##wd": 21724, - "booking": 21725, - "fractured": 21726, - "tailored": 21727, - "anya": 21728, - "spices": 21729, - "westwood": 21730, - "cairns": 21731, - "auditions": 21732, - "inflammation": 21733, - "steamed": 21734, - "##rocity": 21735, - "##acion": 21736, - "##urne": 21737, - "skyla": 21738, - "thereof": 21739, - "watford": 21740, - "torment": 21741, - "archdeacon": 21742, - "transforms": 21743, - "lulu": 21744, - "demeanor": 21745, - "fucked": 21746, - "serge": 21747, - "##sor": 21748, - "mckenna": 21749, - "minas": 21750, - "entertainer": 21751, - "##icide": 21752, - "caress": 21753, - "originate": 21754, - "residue": 21755, - "##sty": 21756, - "1740": 21757, - "##ilised": 21758, - "##org": 21759, - "beech": 21760, - "##wana": 21761, - "subsidies": 21762, - "##ghton": 21763, - "emptied": 21764, - "gladstone": 21765, - "ru": 21766, - "firefighters": 21767, - "voodoo": 21768, - "##rcle": 21769, - "het": 21770, - "nightingale": 21771, - "tamara": 21772, - "edmond": 21773, - "ingredient": 21774, - "weaknesses": 21775, - "silhouette": 21776, - "285": 21777, - "compatibility": 21778, - "withdrawing": 21779, - "hampson": 21780, - "##mona": 21781, - "anguish": 21782, - "giggling": 21783, - "##mber": 21784, - "bookstore": 21785, - "##jiang": 21786, - "southernmost": 21787, - "tilting": 21788, - "##vance": 21789, - "bai": 21790, - "economical": 21791, - "rf": 21792, - "briefcase": 21793, - "dreadful": 21794, - "hinted": 21795, - "projections": 21796, - "shattering": 21797, - "totaling": 21798, - "##rogate": 21799, - "analogue": 21800, - "indicted": 21801, - "periodical": 21802, - "fullback": 21803, - "##dman": 21804, - "haynes": 21805, - "##tenberg": 21806, - "##ffs": 21807, - "##ishment": 21808, - "1745": 21809, - "thirst": 21810, - "stumble": 21811, - "penang": 21812, - "vigorous": 21813, - "##ddling": 21814, - "##kor": 21815, - "##lium": 21816, - "octave": 21817, - "##ove": 21818, - "##enstein": 21819, - "##inen": 21820, - "##ones": 21821, - "siberian": 21822, - "##uti": 21823, - "cbn": 21824, - "repeal": 21825, - "swaying": 21826, - "##vington": 21827, - "khalid": 21828, - "tanaka": 21829, - "unicorn": 21830, - "otago": 21831, - "plastered": 21832, - "lobe": 21833, - "riddle": 21834, - "##rella": 21835, - "perch": 21836, - "##ishing": 21837, - "croydon": 21838, - "filtered": 21839, - "graeme": 21840, - "tripoli": 21841, - "##ossa": 21842, - "crocodile": 21843, - "##chers": 21844, - "sufi": 21845, - "mined": 21846, - "##tung": 21847, - "inferno": 21848, - "lsu": 21849, - "##phi": 21850, - "swelled": 21851, - "utilizes": 21852, - "£2": 21853, - "cale": 21854, - "periodicals": 21855, - "styx": 21856, - "hike": 21857, - "informally": 21858, - "coop": 21859, - "lund": 21860, - "##tidae": 21861, - "ala": 21862, - "hen": 21863, - "qui": 21864, - "transformations": 21865, - "disposed": 21866, - "sheath": 21867, - "chickens": 21868, - "##cade": 21869, - "fitzroy": 21870, - "sas": 21871, - "silesia": 21872, - "unacceptable": 21873, - "odisha": 21874, - "1650": 21875, - "sabrina": 21876, - "pe": 21877, - "spokane": 21878, - "ratios": 21879, - "athena": 21880, - "massage": 21881, - "shen": 21882, - "dilemma": 21883, - "##drum": 21884, - "##riz": 21885, - "##hul": 21886, - "corona": 21887, - "doubtful": 21888, - "niall": 21889, - "##pha": 21890, - "##bino": 21891, - "fines": 21892, - "cite": 21893, - "acknowledging": 21894, - "bangor": 21895, - "ballard": 21896, - "bathurst": 21897, - "##resh": 21898, - "huron": 21899, - "mustered": 21900, - "alzheimer": 21901, - "garments": 21902, - "kinase": 21903, - "tyre": 21904, - "warship": 21905, - "##cp": 21906, - "flashback": 21907, - "pulmonary": 21908, - "braun": 21909, - "cheat": 21910, - "kamal": 21911, - "cyclists": 21912, - "constructions": 21913, - "grenades": 21914, - "ndp": 21915, - "traveller": 21916, - "excuses": 21917, - "stomped": 21918, - "signalling": 21919, - "trimmed": 21920, - "futsal": 21921, - "mosques": 21922, - "relevance": 21923, - "##wine": 21924, - "wta": 21925, - "##23": 21926, - "##vah": 21927, - "##lter": 21928, - "hoc": 21929, - "##riding": 21930, - "optimistic": 21931, - "##´s": 21932, - "deco": 21933, - "sim": 21934, - "interacting": 21935, - "rejecting": 21936, - "moniker": 21937, - "waterways": 21938, - "##ieri": 21939, - "##oku": 21940, - "mayors": 21941, - "gdansk": 21942, - "outnumbered": 21943, - "pearls": 21944, - "##ended": 21945, - "##hampton": 21946, - "fairs": 21947, - "totals": 21948, - "dominating": 21949, - "262": 21950, - "notions": 21951, - "stairway": 21952, - "compiling": 21953, - "pursed": 21954, - "commodities": 21955, - "grease": 21956, - "yeast": 21957, - "##jong": 21958, - "carthage": 21959, - "griffiths": 21960, - "residual": 21961, - "amc": 21962, - "contraction": 21963, - "laird": 21964, - "sapphire": 21965, - "##marine": 21966, - "##ivated": 21967, - "amalgamation": 21968, - "dissolve": 21969, - "inclination": 21970, - "lyle": 21971, - "packaged": 21972, - "altitudes": 21973, - "suez": 21974, - "canons": 21975, - "graded": 21976, - "lurched": 21977, - "narrowing": 21978, - "boasts": 21979, - "guise": 21980, - "wed": 21981, - "enrico": 21982, - "##ovsky": 21983, - "rower": 21984, - "scarred": 21985, - "bree": 21986, - "cub": 21987, - "iberian": 21988, - "protagonists": 21989, - "bargaining": 21990, - "proposing": 21991, - "trainers": 21992, - "voyages": 21993, - "vans": 21994, - "fishes": 21995, - "##aea": 21996, - "##ivist": 21997, - "##verance": 21998, - "encryption": 21999, - "artworks": 22000, - "kazan": 22001, - "sabre": 22002, - "cleopatra": 22003, - "hepburn": 22004, - "rotting": 22005, - "supremacy": 22006, - "mecklenburg": 22007, - "##brate": 22008, - "burrows": 22009, - "hazards": 22010, - "outgoing": 22011, - "flair": 22012, - "organizes": 22013, - "##ctions": 22014, - "scorpion": 22015, - "##usions": 22016, - "boo": 22017, - "234": 22018, - "chevalier": 22019, - "dunedin": 22020, - "slapping": 22021, - "##34": 22022, - "ineligible": 22023, - "pensions": 22024, - "##38": 22025, - "##omic": 22026, - "manufactures": 22027, - "emails": 22028, - "bismarck": 22029, - "238": 22030, - "weakening": 22031, - "blackish": 22032, - "ding": 22033, - "mcgee": 22034, - "quo": 22035, - "##rling": 22036, - "northernmost": 22037, - "xx": 22038, - "manpower": 22039, - "greed": 22040, - "sampson": 22041, - "clicking": 22042, - "##ange": 22043, - "##horpe": 22044, - "##inations": 22045, - "##roving": 22046, - "torre": 22047, - "##eptive": 22048, - "##moral": 22049, - "symbolism": 22050, - "38th": 22051, - "asshole": 22052, - "meritorious": 22053, - "outfits": 22054, - "splashed": 22055, - "biographies": 22056, - "sprung": 22057, - "astros": 22058, - "##tale": 22059, - "302": 22060, - "737": 22061, - "filly": 22062, - "raoul": 22063, - "nw": 22064, - "tokugawa": 22065, - "linden": 22066, - "clubhouse": 22067, - "##apa": 22068, - "tracts": 22069, - "romano": 22070, - "##pio": 22071, - "putin": 22072, - "tags": 22073, - "##note": 22074, - "chained": 22075, - "dickson": 22076, - "gunshot": 22077, - "moe": 22078, - "gunn": 22079, - "rashid": 22080, - "##tails": 22081, - "zipper": 22082, - "##bas": 22083, - "##nea": 22084, - "contrasted": 22085, - "##ply": 22086, - "##udes": 22087, - "plum": 22088, - "pharaoh": 22089, - "##pile": 22090, - "aw": 22091, - "comedies": 22092, - "ingrid": 22093, - "sandwiches": 22094, - "subdivisions": 22095, - "1100": 22096, - "mariana": 22097, - "nokia": 22098, - "kamen": 22099, - "hz": 22100, - "delaney": 22101, - "veto": 22102, - "herring": 22103, - "##words": 22104, - "possessive": 22105, - "outlines": 22106, - "##roup": 22107, - "siemens": 22108, - "stairwell": 22109, - "rc": 22110, - "gallantry": 22111, - "messiah": 22112, - "palais": 22113, - "yells": 22114, - "233": 22115, - "zeppelin": 22116, - "##dm": 22117, - "bolivar": 22118, - "##cede": 22119, - "smackdown": 22120, - "mckinley": 22121, - "##mora": 22122, - "##yt": 22123, - "muted": 22124, - "geologic": 22125, - "finely": 22126, - "unitary": 22127, - "avatar": 22128, - "hamas": 22129, - "maynard": 22130, - "rees": 22131, - "bog": 22132, - "contrasting": 22133, - "##rut": 22134, - "liv": 22135, - "chico": 22136, - "disposition": 22137, - "pixel": 22138, - "##erate": 22139, - "becca": 22140, - "dmitry": 22141, - "yeshiva": 22142, - "narratives": 22143, - "##lva": 22144, - "##ulton": 22145, - "mercenary": 22146, - "sharpe": 22147, - "tempered": 22148, - "navigate": 22149, - "stealth": 22150, - "amassed": 22151, - "keynes": 22152, - "##lini": 22153, - "untouched": 22154, - "##rrie": 22155, - "havoc": 22156, - "lithium": 22157, - "##fighting": 22158, - "abyss": 22159, - "graf": 22160, - "southward": 22161, - "wolverine": 22162, - "balloons": 22163, - "implements": 22164, - "ngos": 22165, - "transitions": 22166, - "##icum": 22167, - "ambushed": 22168, - "concacaf": 22169, - "dormant": 22170, - "economists": 22171, - "##dim": 22172, - "costing": 22173, - "csi": 22174, - "rana": 22175, - "universite": 22176, - "boulders": 22177, - "verity": 22178, - "##llon": 22179, - "collin": 22180, - "mellon": 22181, - "misses": 22182, - "cypress": 22183, - "fluorescent": 22184, - "lifeless": 22185, - "spence": 22186, - "##ulla": 22187, - "crewe": 22188, - "shepard": 22189, - "pak": 22190, - "revelations": 22191, - "##م": 22192, - "jolly": 22193, - "gibbons": 22194, - "paw": 22195, - "##dro": 22196, - "##quel": 22197, - "freeing": 22198, - "##test": 22199, - "shack": 22200, - "fries": 22201, - "palatine": 22202, - "##51": 22203, - "##hiko": 22204, - "accompaniment": 22205, - "cruising": 22206, - "recycled": 22207, - "##aver": 22208, - "erwin": 22209, - "sorting": 22210, - "synthesizers": 22211, - "dyke": 22212, - "realities": 22213, - "sg": 22214, - "strides": 22215, - "enslaved": 22216, - "wetland": 22217, - "##ghan": 22218, - "competence": 22219, - "gunpowder": 22220, - "grassy": 22221, - "maroon": 22222, - "reactors": 22223, - "objection": 22224, - "##oms": 22225, - "carlson": 22226, - "gearbox": 22227, - "macintosh": 22228, - "radios": 22229, - "shelton": 22230, - "##sho": 22231, - "clergyman": 22232, - "prakash": 22233, - "254": 22234, - "mongols": 22235, - "trophies": 22236, - "oricon": 22237, - "228": 22238, - "stimuli": 22239, - "twenty20": 22240, - "cantonese": 22241, - "cortes": 22242, - "mirrored": 22243, - "##saurus": 22244, - "bhp": 22245, - "cristina": 22246, - "melancholy": 22247, - "##lating": 22248, - "enjoyable": 22249, - "nuevo": 22250, - "##wny": 22251, - "downfall": 22252, - "schumacher": 22253, - "##ind": 22254, - "banging": 22255, - "lausanne": 22256, - "rumbled": 22257, - "paramilitary": 22258, - "reflex": 22259, - "ax": 22260, - "amplitude": 22261, - "migratory": 22262, - "##gall": 22263, - "##ups": 22264, - "midi": 22265, - "barnard": 22266, - "lastly": 22267, - "sherry": 22268, - "##hp": 22269, - "##nall": 22270, - "keystone": 22271, - "##kra": 22272, - "carleton": 22273, - "slippery": 22274, - "##53": 22275, - "coloring": 22276, - "foe": 22277, - "socket": 22278, - "otter": 22279, - "##rgos": 22280, - "mats": 22281, - "##tose": 22282, - "consultants": 22283, - "bafta": 22284, - "bison": 22285, - "topping": 22286, - "##km": 22287, - "490": 22288, - "primal": 22289, - "abandonment": 22290, - "transplant": 22291, - "atoll": 22292, - "hideous": 22293, - "mort": 22294, - "pained": 22295, - "reproduced": 22296, - "tae": 22297, - "howling": 22298, - "##turn": 22299, - "unlawful": 22300, - "billionaire": 22301, - "hotter": 22302, - "poised": 22303, - "lansing": 22304, - "##chang": 22305, - "dinamo": 22306, - "retro": 22307, - "messing": 22308, - "nfc": 22309, - "domesday": 22310, - "##mina": 22311, - "blitz": 22312, - "timed": 22313, - "##athing": 22314, - "##kley": 22315, - "ascending": 22316, - "gesturing": 22317, - "##izations": 22318, - "signaled": 22319, - "tis": 22320, - "chinatown": 22321, - "mermaid": 22322, - "savanna": 22323, - "jameson": 22324, - "##aint": 22325, - "catalina": 22326, - "##pet": 22327, - "##hers": 22328, - "cochrane": 22329, - "cy": 22330, - "chatting": 22331, - "##kus": 22332, - "alerted": 22333, - "computation": 22334, - "mused": 22335, - "noelle": 22336, - "majestic": 22337, - "mohawk": 22338, - "campo": 22339, - "octagonal": 22340, - "##sant": 22341, - "##hend": 22342, - "241": 22343, - "aspiring": 22344, - "##mart": 22345, - "comprehend": 22346, - "iona": 22347, - "paralyzed": 22348, - "shimmering": 22349, - "swindon": 22350, - "rhone": 22351, - "##eley": 22352, - "reputed": 22353, - "configurations": 22354, - "pitchfork": 22355, - "agitation": 22356, - "francais": 22357, - "gillian": 22358, - "lipstick": 22359, - "##ilo": 22360, - "outsiders": 22361, - "pontifical": 22362, - "resisting": 22363, - "bitterness": 22364, - "sewer": 22365, - "rockies": 22366, - "##edd": 22367, - "##ucher": 22368, - "misleading": 22369, - "1756": 22370, - "exiting": 22371, - "galloway": 22372, - "##nging": 22373, - "risked": 22374, - "##heart": 22375, - "246": 22376, - "commemoration": 22377, - "schultz": 22378, - "##rka": 22379, - "integrating": 22380, - "##rsa": 22381, - "poses": 22382, - "shrieked": 22383, - "##weiler": 22384, - "guineas": 22385, - "gladys": 22386, - "jerking": 22387, - "owls": 22388, - "goldsmith": 22389, - "nightly": 22390, - "penetrating": 22391, - "##unced": 22392, - "lia": 22393, - "##33": 22394, - "ignited": 22395, - "betsy": 22396, - "##aring": 22397, - "##thorpe": 22398, - "follower": 22399, - "vigorously": 22400, - "##rave": 22401, - "coded": 22402, - "kiran": 22403, - "knit": 22404, - "zoology": 22405, - "tbilisi": 22406, - "##28": 22407, - "##bered": 22408, - "repository": 22409, - "govt": 22410, - "deciduous": 22411, - "dino": 22412, - "growling": 22413, - "##bba": 22414, - "enhancement": 22415, - "unleashed": 22416, - "chanting": 22417, - "pussy": 22418, - "biochemistry": 22419, - "##eric": 22420, - "kettle": 22421, - "repression": 22422, - "toxicity": 22423, - "nrhp": 22424, - "##arth": 22425, - "##kko": 22426, - "##bush": 22427, - "ernesto": 22428, - "commended": 22429, - "outspoken": 22430, - "242": 22431, - "mca": 22432, - "parchment": 22433, - "sms": 22434, - "kristen": 22435, - "##aton": 22436, - "bisexual": 22437, - "raked": 22438, - "glamour": 22439, - "navajo": 22440, - "a2": 22441, - "conditioned": 22442, - "showcased": 22443, - "##hma": 22444, - "spacious": 22445, - "youthful": 22446, - "##esa": 22447, - "usl": 22448, - "appliances": 22449, - "junta": 22450, - "brest": 22451, - "layne": 22452, - "conglomerate": 22453, - "enchanted": 22454, - "chao": 22455, - "loosened": 22456, - "picasso": 22457, - "circulating": 22458, - "inspect": 22459, - "montevideo": 22460, - "##centric": 22461, - "##kti": 22462, - "piazza": 22463, - "spurred": 22464, - "##aith": 22465, - "bari": 22466, - "freedoms": 22467, - "poultry": 22468, - "stamford": 22469, - "lieu": 22470, - "##ect": 22471, - "indigo": 22472, - "sarcastic": 22473, - "bahia": 22474, - "stump": 22475, - "attach": 22476, - "dvds": 22477, - "frankenstein": 22478, - "lille": 22479, - "approx": 22480, - "scriptures": 22481, - "pollen": 22482, - "##script": 22483, - "nmi": 22484, - "overseen": 22485, - "##ivism": 22486, - "tides": 22487, - "proponent": 22488, - "newmarket": 22489, - "inherit": 22490, - "milling": 22491, - "##erland": 22492, - "centralized": 22493, - "##rou": 22494, - "distributors": 22495, - "credentials": 22496, - "drawers": 22497, - "abbreviation": 22498, - "##lco": 22499, - "##xon": 22500, - "downing": 22501, - "uncomfortably": 22502, - "ripe": 22503, - "##oes": 22504, - "erase": 22505, - "franchises": 22506, - "##ever": 22507, - "populace": 22508, - "##bery": 22509, - "##khar": 22510, - "decomposition": 22511, - "pleas": 22512, - "##tet": 22513, - "daryl": 22514, - "sabah": 22515, - "##stle": 22516, - "##wide": 22517, - "fearless": 22518, - "genie": 22519, - "lesions": 22520, - "annette": 22521, - "##ogist": 22522, - "oboe": 22523, - "appendix": 22524, - "nair": 22525, - "dripped": 22526, - "petitioned": 22527, - "maclean": 22528, - "mosquito": 22529, - "parrot": 22530, - "rpg": 22531, - "hampered": 22532, - "1648": 22533, - "operatic": 22534, - "reservoirs": 22535, - "##tham": 22536, - "irrelevant": 22537, - "jolt": 22538, - "summarized": 22539, - "##fp": 22540, - "medallion": 22541, - "##taff": 22542, - "##−": 22543, - "clawed": 22544, - "harlow": 22545, - "narrower": 22546, - "goddard": 22547, - "marcia": 22548, - "bodied": 22549, - "fremont": 22550, - "suarez": 22551, - "altering": 22552, - "tempest": 22553, - "mussolini": 22554, - "porn": 22555, - "##isms": 22556, - "sweetly": 22557, - "oversees": 22558, - "walkers": 22559, - "solitude": 22560, - "grimly": 22561, - "shrines": 22562, - "hk": 22563, - "ich": 22564, - "supervisors": 22565, - "hostess": 22566, - "dietrich": 22567, - "legitimacy": 22568, - "brushes": 22569, - "expressive": 22570, - "##yp": 22571, - "dissipated": 22572, - "##rse": 22573, - "localized": 22574, - "systemic": 22575, - "##nikov": 22576, - "gettysburg": 22577, - "##js": 22578, - "##uaries": 22579, - "dialogues": 22580, - "muttering": 22581, - "251": 22582, - "housekeeper": 22583, - "sicilian": 22584, - "discouraged": 22585, - "##frey": 22586, - "beamed": 22587, - "kaladin": 22588, - "halftime": 22589, - "kidnap": 22590, - "##amo": 22591, - "##llet": 22592, - "1754": 22593, - "synonymous": 22594, - "depleted": 22595, - "instituto": 22596, - "insulin": 22597, - "reprised": 22598, - "##opsis": 22599, - "clashed": 22600, - "##ctric": 22601, - "interrupting": 22602, - "radcliffe": 22603, - "insisting": 22604, - "medici": 22605, - "1715": 22606, - "ejected": 22607, - "playfully": 22608, - "turbulent": 22609, - "##47": 22610, - "starvation": 22611, - "##rini": 22612, - "shipment": 22613, - "rebellious": 22614, - "petersen": 22615, - "verification": 22616, - "merits": 22617, - "##rified": 22618, - "cakes": 22619, - "##charged": 22620, - "1757": 22621, - "milford": 22622, - "shortages": 22623, - "spying": 22624, - "fidelity": 22625, - "##aker": 22626, - "emitted": 22627, - "storylines": 22628, - "harvested": 22629, - "seismic": 22630, - "##iform": 22631, - "cheung": 22632, - "kilda": 22633, - "theoretically": 22634, - "barbie": 22635, - "lynx": 22636, - "##rgy": 22637, - "##tius": 22638, - "goblin": 22639, - "mata": 22640, - "poisonous": 22641, - "##nburg": 22642, - "reactive": 22643, - "residues": 22644, - "obedience": 22645, - "##евич": 22646, - "conjecture": 22647, - "##rac": 22648, - "401": 22649, - "hating": 22650, - "sixties": 22651, - "kicker": 22652, - "moaning": 22653, - "motown": 22654, - "##bha": 22655, - "emancipation": 22656, - "neoclassical": 22657, - "##hering": 22658, - "consoles": 22659, - "ebert": 22660, - "professorship": 22661, - "##tures": 22662, - "sustaining": 22663, - "assaults": 22664, - "obeyed": 22665, - "affluent": 22666, - "incurred": 22667, - "tornadoes": 22668, - "##eber": 22669, - "##zow": 22670, - "emphasizing": 22671, - "highlanders": 22672, - "cheated": 22673, - "helmets": 22674, - "##ctus": 22675, - "internship": 22676, - "terence": 22677, - "bony": 22678, - "executions": 22679, - "legislators": 22680, - "berries": 22681, - "peninsular": 22682, - "tinged": 22683, - "##aco": 22684, - "1689": 22685, - "amplifier": 22686, - "corvette": 22687, - "ribbons": 22688, - "lavish": 22689, - "pennant": 22690, - "##lander": 22691, - "worthless": 22692, - "##chfield": 22693, - "##forms": 22694, - "mariano": 22695, - "pyrenees": 22696, - "expenditures": 22697, - "##icides": 22698, - "chesterfield": 22699, - "mandir": 22700, - "tailor": 22701, - "39th": 22702, - "sergey": 22703, - "nestled": 22704, - "willed": 22705, - "aristocracy": 22706, - "devotees": 22707, - "goodnight": 22708, - "raaf": 22709, - "rumored": 22710, - "weaponry": 22711, - "remy": 22712, - "appropriations": 22713, - "harcourt": 22714, - "burr": 22715, - "riaa": 22716, - "##lence": 22717, - "limitation": 22718, - "unnoticed": 22719, - "guo": 22720, - "soaking": 22721, - "swamps": 22722, - "##tica": 22723, - "collapsing": 22724, - "tatiana": 22725, - "descriptive": 22726, - "brigham": 22727, - "psalm": 22728, - "##chment": 22729, - "maddox": 22730, - "##lization": 22731, - "patti": 22732, - "caliph": 22733, - "##aja": 22734, - "akron": 22735, - "injuring": 22736, - "serra": 22737, - "##ganj": 22738, - "basins": 22739, - "##sari": 22740, - "astonished": 22741, - "launcher": 22742, - "##church": 22743, - "hilary": 22744, - "wilkins": 22745, - "sewing": 22746, - "##sf": 22747, - "stinging": 22748, - "##fia": 22749, - "##ncia": 22750, - "underwood": 22751, - "startup": 22752, - "##ition": 22753, - "compilations": 22754, - "vibrations": 22755, - "embankment": 22756, - "jurist": 22757, - "##nity": 22758, - "bard": 22759, - "juventus": 22760, - "groundwater": 22761, - "kern": 22762, - "palaces": 22763, - "helium": 22764, - "boca": 22765, - "cramped": 22766, - "marissa": 22767, - "soto": 22768, - "##worm": 22769, - "jae": 22770, - "princely": 22771, - "##ggy": 22772, - "faso": 22773, - "bazaar": 22774, - "warmly": 22775, - "##voking": 22776, - "229": 22777, - "pairing": 22778, - "##lite": 22779, - "##grate": 22780, - "##nets": 22781, - "wien": 22782, - "freaked": 22783, - "ulysses": 22784, - "rebirth": 22785, - "##alia": 22786, - "##rent": 22787, - "mummy": 22788, - "guzman": 22789, - "jimenez": 22790, - "stilled": 22791, - "##nitz": 22792, - "trajectory": 22793, - "tha": 22794, - "woken": 22795, - "archival": 22796, - "professions": 22797, - "##pts": 22798, - "##pta": 22799, - "hilly": 22800, - "shadowy": 22801, - "shrink": 22802, - "##bolt": 22803, - "norwood": 22804, - "glued": 22805, - "migrate": 22806, - "stereotypes": 22807, - "devoid": 22808, - "##pheus": 22809, - "625": 22810, - "evacuate": 22811, - "horrors": 22812, - "infancy": 22813, - "gotham": 22814, - "knowles": 22815, - "optic": 22816, - "downloaded": 22817, - "sachs": 22818, - "kingsley": 22819, - "parramatta": 22820, - "darryl": 22821, - "mor": 22822, - "##onale": 22823, - "shady": 22824, - "commence": 22825, - "confesses": 22826, - "kan": 22827, - "##meter": 22828, - "##placed": 22829, - "marlborough": 22830, - "roundabout": 22831, - "regents": 22832, - "frigates": 22833, - "io": 22834, - "##imating": 22835, - "gothenburg": 22836, - "revoked": 22837, - "carvings": 22838, - "clockwise": 22839, - "convertible": 22840, - "intruder": 22841, - "##sche": 22842, - "banged": 22843, - "##ogo": 22844, - "vicky": 22845, - "bourgeois": 22846, - "##mony": 22847, - "dupont": 22848, - "footing": 22849, - "##gum": 22850, - "pd": 22851, - "##real": 22852, - "buckle": 22853, - "yun": 22854, - "penthouse": 22855, - "sane": 22856, - "720": 22857, - "serviced": 22858, - "stakeholders": 22859, - "neumann": 22860, - "bb": 22861, - "##eers": 22862, - "comb": 22863, - "##gam": 22864, - "catchment": 22865, - "pinning": 22866, - "rallies": 22867, - "typing": 22868, - "##elles": 22869, - "forefront": 22870, - "freiburg": 22871, - "sweetie": 22872, - "giacomo": 22873, - "widowed": 22874, - "goodwill": 22875, - "worshipped": 22876, - "aspirations": 22877, - "midday": 22878, - "##vat": 22879, - "fishery": 22880, - "##trick": 22881, - "bournemouth": 22882, - "turk": 22883, - "243": 22884, - "hearth": 22885, - "ethanol": 22886, - "guadalajara": 22887, - "murmurs": 22888, - "sl": 22889, - "##uge": 22890, - "afforded": 22891, - "scripted": 22892, - "##hta": 22893, - "wah": 22894, - "##jn": 22895, - "coroner": 22896, - "translucent": 22897, - "252": 22898, - "memorials": 22899, - "puck": 22900, - "progresses": 22901, - "clumsy": 22902, - "##race": 22903, - "315": 22904, - "candace": 22905, - "recounted": 22906, - "##27": 22907, - "##slin": 22908, - "##uve": 22909, - "filtering": 22910, - "##mac": 22911, - "howl": 22912, - "strata": 22913, - "heron": 22914, - "leveled": 22915, - "##ays": 22916, - "dubious": 22917, - "##oja": 22918, - "##т": 22919, - "##wheel": 22920, - "citations": 22921, - "exhibiting": 22922, - "##laya": 22923, - "##mics": 22924, - "##pods": 22925, - "turkic": 22926, - "##lberg": 22927, - "injunction": 22928, - "##ennial": 22929, - "##mit": 22930, - "antibodies": 22931, - "##44": 22932, - "organise": 22933, - "##rigues": 22934, - "cardiovascular": 22935, - "cushion": 22936, - "inverness": 22937, - "##zquez": 22938, - "dia": 22939, - "cocoa": 22940, - "sibling": 22941, - "##tman": 22942, - "##roid": 22943, - "expanse": 22944, - "feasible": 22945, - "tunisian": 22946, - "algiers": 22947, - "##relli": 22948, - "rus": 22949, - "bloomberg": 22950, - "dso": 22951, - "westphalia": 22952, - "bro": 22953, - "tacoma": 22954, - "281": 22955, - "downloads": 22956, - "##ours": 22957, - "konrad": 22958, - "duran": 22959, - "##hdi": 22960, - "continuum": 22961, - "jett": 22962, - "compares": 22963, - "legislator": 22964, - "secession": 22965, - "##nable": 22966, - "##gues": 22967, - "##zuka": 22968, - "translating": 22969, - "reacher": 22970, - "##gley": 22971, - "##ła": 22972, - "aleppo": 22973, - "##agi": 22974, - "tc": 22975, - "orchards": 22976, - "trapping": 22977, - "linguist": 22978, - "versatile": 22979, - "drumming": 22980, - "postage": 22981, - "calhoun": 22982, - "superiors": 22983, - "##mx": 22984, - "barefoot": 22985, - "leary": 22986, - "##cis": 22987, - "ignacio": 22988, - "alfa": 22989, - "kaplan": 22990, - "##rogen": 22991, - "bratislava": 22992, - "mori": 22993, - "##vot": 22994, - "disturb": 22995, - "haas": 22996, - "313": 22997, - "cartridges": 22998, - "gilmore": 22999, - "radiated": 23000, - "salford": 23001, - "tunic": 23002, - "hades": 23003, - "##ulsive": 23004, - "archeological": 23005, - "delilah": 23006, - "magistrates": 23007, - "auditioned": 23008, - "brewster": 23009, - "charters": 23010, - "empowerment": 23011, - "blogs": 23012, - "cappella": 23013, - "dynasties": 23014, - "iroquois": 23015, - "whipping": 23016, - "##krishna": 23017, - "raceway": 23018, - "truths": 23019, - "myra": 23020, - "weaken": 23021, - "judah": 23022, - "mcgregor": 23023, - "##horse": 23024, - "mic": 23025, - "refueling": 23026, - "37th": 23027, - "burnley": 23028, - "bosses": 23029, - "markus": 23030, - "premio": 23031, - "query": 23032, - "##gga": 23033, - "dunbar": 23034, - "##economic": 23035, - "darkest": 23036, - "lyndon": 23037, - "sealing": 23038, - "commendation": 23039, - "reappeared": 23040, - "##mun": 23041, - "addicted": 23042, - "ezio": 23043, - "slaughtered": 23044, - "satisfactory": 23045, - "shuffle": 23046, - "##eves": 23047, - "##thic": 23048, - "##uj": 23049, - "fortification": 23050, - "warrington": 23051, - "##otto": 23052, - "resurrected": 23053, - "fargo": 23054, - "mane": 23055, - "##utable": 23056, - "##lei": 23057, - "##space": 23058, - "foreword": 23059, - "ox": 23060, - "##aris": 23061, - "##vern": 23062, - "abrams": 23063, - "hua": 23064, - "##mento": 23065, - "sakura": 23066, - "##alo": 23067, - "uv": 23068, - "sentimental": 23069, - "##skaya": 23070, - "midfield": 23071, - "##eses": 23072, - "sturdy": 23073, - "scrolls": 23074, - "macleod": 23075, - "##kyu": 23076, - "entropy": 23077, - "##lance": 23078, - "mitochondrial": 23079, - "cicero": 23080, - "excelled": 23081, - "thinner": 23082, - "convoys": 23083, - "perceive": 23084, - "##oslav": 23085, - "##urable": 23086, - "systematically": 23087, - "grind": 23088, - "burkina": 23089, - "287": 23090, - "##tagram": 23091, - "ops": 23092, - "##aman": 23093, - "guantanamo": 23094, - "##cloth": 23095, - "##tite": 23096, - "forcefully": 23097, - "wavy": 23098, - "##jou": 23099, - "pointless": 23100, - "##linger": 23101, - "##tze": 23102, - "layton": 23103, - "portico": 23104, - "superficial": 23105, - "clerical": 23106, - "outlaws": 23107, - "##hism": 23108, - "burials": 23109, - "muir": 23110, - "##inn": 23111, - "creditors": 23112, - "hauling": 23113, - "rattle": 23114, - "##leg": 23115, - "calais": 23116, - "monde": 23117, - "archers": 23118, - "reclaimed": 23119, - "dwell": 23120, - "wexford": 23121, - "hellenic": 23122, - "falsely": 23123, - "remorse": 23124, - "##tek": 23125, - "dough": 23126, - "furnishings": 23127, - "##uttered": 23128, - "gabon": 23129, - "neurological": 23130, - "novice": 23131, - "##igraphy": 23132, - "contemplated": 23133, - "pulpit": 23134, - "nightstand": 23135, - "saratoga": 23136, - "##istan": 23137, - "documenting": 23138, - "pulsing": 23139, - "taluk": 23140, - "##firmed": 23141, - "busted": 23142, - "marital": 23143, - "##rien": 23144, - "disagreements": 23145, - "wasps": 23146, - "##yes": 23147, - "hodge": 23148, - "mcdonnell": 23149, - "mimic": 23150, - "fran": 23151, - "pendant": 23152, - "dhabi": 23153, - "musa": 23154, - "##nington": 23155, - "congratulations": 23156, - "argent": 23157, - "darrell": 23158, - "concussion": 23159, - "losers": 23160, - "regrets": 23161, - "thessaloniki": 23162, - "reversal": 23163, - "donaldson": 23164, - "hardwood": 23165, - "thence": 23166, - "achilles": 23167, - "ritter": 23168, - "##eran": 23169, - "demonic": 23170, - "jurgen": 23171, - "prophets": 23172, - "goethe": 23173, - "eki": 23174, - "classmate": 23175, - "buff": 23176, - "##cking": 23177, - "yank": 23178, - "irrational": 23179, - "##inging": 23180, - "perished": 23181, - "seductive": 23182, - "qur": 23183, - "sourced": 23184, - "##crat": 23185, - "##typic": 23186, - "mustard": 23187, - "ravine": 23188, - "barre": 23189, - "horizontally": 23190, - "characterization": 23191, - "phylogenetic": 23192, - "boise": 23193, - "##dit": 23194, - "##runner": 23195, - "##tower": 23196, - "brutally": 23197, - "intercourse": 23198, - "seduce": 23199, - "##bbing": 23200, - "fay": 23201, - "ferris": 23202, - "ogden": 23203, - "amar": 23204, - "nik": 23205, - "unarmed": 23206, - "##inator": 23207, - "evaluating": 23208, - "kyrgyzstan": 23209, - "sweetness": 23210, - "##lford": 23211, - "##oki": 23212, - "mccormick": 23213, - "meiji": 23214, - "notoriety": 23215, - "stimulate": 23216, - "disrupt": 23217, - "figuring": 23218, - "instructional": 23219, - "mcgrath": 23220, - "##zoo": 23221, - "groundbreaking": 23222, - "##lto": 23223, - "flinch": 23224, - "khorasan": 23225, - "agrarian": 23226, - "bengals": 23227, - "mixer": 23228, - "radiating": 23229, - "##sov": 23230, - "ingram": 23231, - "pitchers": 23232, - "nad": 23233, - "tariff": 23234, - "##cript": 23235, - "tata": 23236, - "##codes": 23237, - "##emi": 23238, - "##ungen": 23239, - "appellate": 23240, - "lehigh": 23241, - "##bled": 23242, - "##giri": 23243, - "brawl": 23244, - "duct": 23245, - "texans": 23246, - "##ciation": 23247, - "##ropolis": 23248, - "skipper": 23249, - "speculative": 23250, - "vomit": 23251, - "doctrines": 23252, - "stresses": 23253, - "253": 23254, - "davy": 23255, - "graders": 23256, - "whitehead": 23257, - "jozef": 23258, - "timely": 23259, - "cumulative": 23260, - "haryana": 23261, - "paints": 23262, - "appropriately": 23263, - "boon": 23264, - "cactus": 23265, - "##ales": 23266, - "##pid": 23267, - "dow": 23268, - "legions": 23269, - "##pit": 23270, - "perceptions": 23271, - "1730": 23272, - "picturesque": 23273, - "##yse": 23274, - "periphery": 23275, - "rune": 23276, - "wr": 23277, - "##aha": 23278, - "celtics": 23279, - "sentencing": 23280, - "whoa": 23281, - "##erin": 23282, - "confirms": 23283, - "variance": 23284, - "425": 23285, - "moines": 23286, - "mathews": 23287, - "spade": 23288, - "rave": 23289, - "m1": 23290, - "fronted": 23291, - "fx": 23292, - "blending": 23293, - "alleging": 23294, - "reared": 23295, - "##gl": 23296, - "237": 23297, - "##paper": 23298, - "grassroots": 23299, - "eroded": 23300, - "##free": 23301, - "##physical": 23302, - "directs": 23303, - "ordeal": 23304, - "##sław": 23305, - "accelerate": 23306, - "hacker": 23307, - "rooftop": 23308, - "##inia": 23309, - "lev": 23310, - "buys": 23311, - "cebu": 23312, - "devote": 23313, - "##lce": 23314, - "specialising": 23315, - "##ulsion": 23316, - "choreographed": 23317, - "repetition": 23318, - "warehouses": 23319, - "##ryl": 23320, - "paisley": 23321, - "tuscany": 23322, - "analogy": 23323, - "sorcerer": 23324, - "hash": 23325, - "huts": 23326, - "shards": 23327, - "descends": 23328, - "exclude": 23329, - "nix": 23330, - "chaplin": 23331, - "gaga": 23332, - "ito": 23333, - "vane": 23334, - "##drich": 23335, - "causeway": 23336, - "misconduct": 23337, - "limo": 23338, - "orchestrated": 23339, - "glands": 23340, - "jana": 23341, - "##kot": 23342, - "u2": 23343, - "##mple": 23344, - "##sons": 23345, - "branching": 23346, - "contrasts": 23347, - "scoop": 23348, - "longed": 23349, - "##virus": 23350, - "chattanooga": 23351, - "##75": 23352, - "syrup": 23353, - "cornerstone": 23354, - "##tized": 23355, - "##mind": 23356, - "##iaceae": 23357, - "careless": 23358, - "precedence": 23359, - "frescoes": 23360, - "##uet": 23361, - "chilled": 23362, - "consult": 23363, - "modelled": 23364, - "snatch": 23365, - "peat": 23366, - "##thermal": 23367, - "caucasian": 23368, - "humane": 23369, - "relaxation": 23370, - "spins": 23371, - "temperance": 23372, - "##lbert": 23373, - "occupations": 23374, - "lambda": 23375, - "hybrids": 23376, - "moons": 23377, - "mp3": 23378, - "##oese": 23379, - "247": 23380, - "rolf": 23381, - "societal": 23382, - "yerevan": 23383, - "ness": 23384, - "##ssler": 23385, - "befriended": 23386, - "mechanized": 23387, - "nominate": 23388, - "trough": 23389, - "boasted": 23390, - "cues": 23391, - "seater": 23392, - "##hom": 23393, - "bends": 23394, - "##tangle": 23395, - "conductors": 23396, - "emptiness": 23397, - "##lmer": 23398, - "eurasian": 23399, - "adriatic": 23400, - "tian": 23401, - "##cie": 23402, - "anxiously": 23403, - "lark": 23404, - "propellers": 23405, - "chichester": 23406, - "jock": 23407, - "ev": 23408, - "2a": 23409, - "##holding": 23410, - "credible": 23411, - "recounts": 23412, - "tori": 23413, - "loyalist": 23414, - "abduction": 23415, - "##hoot": 23416, - "##redo": 23417, - "nepali": 23418, - "##mite": 23419, - "ventral": 23420, - "tempting": 23421, - "##ango": 23422, - "##crats": 23423, - "steered": 23424, - "##wice": 23425, - "javelin": 23426, - "dipping": 23427, - "laborers": 23428, - "prentice": 23429, - "looming": 23430, - "titanium": 23431, - "##ː": 23432, - "badges": 23433, - "emir": 23434, - "tensor": 23435, - "##ntation": 23436, - "egyptians": 23437, - "rash": 23438, - "denies": 23439, - "hawthorne": 23440, - "lombard": 23441, - "showers": 23442, - "wehrmacht": 23443, - "dietary": 23444, - "trojan": 23445, - "##reus": 23446, - "welles": 23447, - "executing": 23448, - "horseshoe": 23449, - "lifeboat": 23450, - "##lak": 23451, - "elsa": 23452, - "infirmary": 23453, - "nearing": 23454, - "roberta": 23455, - "boyer": 23456, - "mutter": 23457, - "trillion": 23458, - "joanne": 23459, - "##fine": 23460, - "##oked": 23461, - "sinks": 23462, - "vortex": 23463, - "uruguayan": 23464, - "clasp": 23465, - "sirius": 23466, - "##block": 23467, - "accelerator": 23468, - "prohibit": 23469, - "sunken": 23470, - "byu": 23471, - "chronological": 23472, - "diplomats": 23473, - "ochreous": 23474, - "510": 23475, - "symmetrical": 23476, - "1644": 23477, - "maia": 23478, - "##tology": 23479, - "salts": 23480, - "reigns": 23481, - "atrocities": 23482, - "##ия": 23483, - "hess": 23484, - "bared": 23485, - "issn": 23486, - "##vyn": 23487, - "cater": 23488, - "saturated": 23489, - "##cycle": 23490, - "##isse": 23491, - "sable": 23492, - "voyager": 23493, - "dyer": 23494, - "yusuf": 23495, - "##inge": 23496, - "fountains": 23497, - "wolff": 23498, - "##39": 23499, - "##nni": 23500, - "engraving": 23501, - "rollins": 23502, - "atheist": 23503, - "ominous": 23504, - "##ault": 23505, - "herr": 23506, - "chariot": 23507, - "martina": 23508, - "strung": 23509, - "##fell": 23510, - "##farlane": 23511, - "horrific": 23512, - "sahib": 23513, - "gazes": 23514, - "saetan": 23515, - "erased": 23516, - "ptolemy": 23517, - "##olic": 23518, - "flushing": 23519, - "lauderdale": 23520, - "analytic": 23521, - "##ices": 23522, - "530": 23523, - "navarro": 23524, - "beak": 23525, - "gorilla": 23526, - "herrera": 23527, - "broom": 23528, - "guadalupe": 23529, - "raiding": 23530, - "sykes": 23531, - "311": 23532, - "bsc": 23533, - "deliveries": 23534, - "1720": 23535, - "invasions": 23536, - "carmichael": 23537, - "tajikistan": 23538, - "thematic": 23539, - "ecumenical": 23540, - "sentiments": 23541, - "onstage": 23542, - "##rians": 23543, - "##brand": 23544, - "##sume": 23545, - "catastrophic": 23546, - "flanks": 23547, - "molten": 23548, - "##arns": 23549, - "waller": 23550, - "aimee": 23551, - "terminating": 23552, - "##icing": 23553, - "alternately": 23554, - "##oche": 23555, - "nehru": 23556, - "printers": 23557, - "outraged": 23558, - "##eving": 23559, - "empires": 23560, - "template": 23561, - "banners": 23562, - "repetitive": 23563, - "za": 23564, - "##oise": 23565, - "vegetarian": 23566, - "##tell": 23567, - "guiana": 23568, - "opt": 23569, - "cavendish": 23570, - "lucknow": 23571, - "synthesized": 23572, - "##hani": 23573, - "##mada": 23574, - "finalized": 23575, - "##ctable": 23576, - "fictitious": 23577, - "mayoral": 23578, - "unreliable": 23579, - "##enham": 23580, - "embracing": 23581, - "peppers": 23582, - "rbis": 23583, - "##chio": 23584, - "##neo": 23585, - "inhibition": 23586, - "slashed": 23587, - "togo": 23588, - "orderly": 23589, - "embroidered": 23590, - "safari": 23591, - "salty": 23592, - "236": 23593, - "barron": 23594, - "benito": 23595, - "totaled": 23596, - "##dak": 23597, - "pubs": 23598, - "simulated": 23599, - "caden": 23600, - "devin": 23601, - "tolkien": 23602, - "momma": 23603, - "welding": 23604, - "sesame": 23605, - "##ept": 23606, - "gottingen": 23607, - "hardness": 23608, - "630": 23609, - "shaman": 23610, - "temeraire": 23611, - "620": 23612, - "adequately": 23613, - "pediatric": 23614, - "##kit": 23615, - "ck": 23616, - "assertion": 23617, - "radicals": 23618, - "composure": 23619, - "cadence": 23620, - "seafood": 23621, - "beaufort": 23622, - "lazarus": 23623, - "mani": 23624, - "warily": 23625, - "cunning": 23626, - "kurdistan": 23627, - "249": 23628, - "cantata": 23629, - "##kir": 23630, - "ares": 23631, - "##41": 23632, - "##clusive": 23633, - "nape": 23634, - "townland": 23635, - "geared": 23636, - "insulted": 23637, - "flutter": 23638, - "boating": 23639, - "violate": 23640, - "draper": 23641, - "dumping": 23642, - "malmo": 23643, - "##hh": 23644, - "##romatic": 23645, - "firearm": 23646, - "alta": 23647, - "bono": 23648, - "obscured": 23649, - "##clave": 23650, - "exceeds": 23651, - "panorama": 23652, - "unbelievable": 23653, - "##train": 23654, - "preschool": 23655, - "##essed": 23656, - "disconnected": 23657, - "installing": 23658, - "rescuing": 23659, - "secretaries": 23660, - "accessibility": 23661, - "##castle": 23662, - "##drive": 23663, - "##ifice": 23664, - "##film": 23665, - "bouts": 23666, - "slug": 23667, - "waterway": 23668, - "mindanao": 23669, - "##buro": 23670, - "##ratic": 23671, - "halves": 23672, - "##ل": 23673, - "calming": 23674, - "liter": 23675, - "maternity": 23676, - "adorable": 23677, - "bragg": 23678, - "electrification": 23679, - "mcc": 23680, - "##dote": 23681, - "roxy": 23682, - "schizophrenia": 23683, - "##body": 23684, - "munoz": 23685, - "kaye": 23686, - "whaling": 23687, - "239": 23688, - "mil": 23689, - "tingling": 23690, - "tolerant": 23691, - "##ago": 23692, - "unconventional": 23693, - "volcanoes": 23694, - "##finder": 23695, - "deportivo": 23696, - "##llie": 23697, - "robson": 23698, - "kaufman": 23699, - "neuroscience": 23700, - "wai": 23701, - "deportation": 23702, - "masovian": 23703, - "scraping": 23704, - "converse": 23705, - "##bh": 23706, - "hacking": 23707, - "bulge": 23708, - "##oun": 23709, - "administratively": 23710, - "yao": 23711, - "580": 23712, - "amp": 23713, - "mammoth": 23714, - "booster": 23715, - "claremont": 23716, - "hooper": 23717, - "nomenclature": 23718, - "pursuits": 23719, - "mclaughlin": 23720, - "melinda": 23721, - "##sul": 23722, - "catfish": 23723, - "barclay": 23724, - "substrates": 23725, - "taxa": 23726, - "zee": 23727, - "originals": 23728, - "kimberly": 23729, - "packets": 23730, - "padma": 23731, - "##ality": 23732, - "borrowing": 23733, - "ostensibly": 23734, - "solvent": 23735, - "##bri": 23736, - "##genesis": 23737, - "##mist": 23738, - "lukas": 23739, - "shreveport": 23740, - "veracruz": 23741, - "##ь": 23742, - "##lou": 23743, - "##wives": 23744, - "cheney": 23745, - "tt": 23746, - "anatolia": 23747, - "hobbs": 23748, - "##zyn": 23749, - "cyclic": 23750, - "radiant": 23751, - "alistair": 23752, - "greenish": 23753, - "siena": 23754, - "dat": 23755, - "independents": 23756, - "##bation": 23757, - "conform": 23758, - "pieter": 23759, - "hyper": 23760, - "applicant": 23761, - "bradshaw": 23762, - "spores": 23763, - "telangana": 23764, - "vinci": 23765, - "inexpensive": 23766, - "nuclei": 23767, - "322": 23768, - "jang": 23769, - "nme": 23770, - "soho": 23771, - "spd": 23772, - "##ign": 23773, - "cradled": 23774, - "receptionist": 23775, - "pow": 23776, - "##43": 23777, - "##rika": 23778, - "fascism": 23779, - "##ifer": 23780, - "experimenting": 23781, - "##ading": 23782, - "##iec": 23783, - "##region": 23784, - "345": 23785, - "jocelyn": 23786, - "maris": 23787, - "stair": 23788, - "nocturnal": 23789, - "toro": 23790, - "constabulary": 23791, - "elgin": 23792, - "##kker": 23793, - "msc": 23794, - "##giving": 23795, - "##schen": 23796, - "##rase": 23797, - "doherty": 23798, - "doping": 23799, - "sarcastically": 23800, - "batter": 23801, - "maneuvers": 23802, - "##cano": 23803, - "##apple": 23804, - "##gai": 23805, - "##git": 23806, - "intrinsic": 23807, - "##nst": 23808, - "##stor": 23809, - "1753": 23810, - "showtime": 23811, - "cafes": 23812, - "gasps": 23813, - "lviv": 23814, - "ushered": 23815, - "##thed": 23816, - "fours": 23817, - "restart": 23818, - "astonishment": 23819, - "transmitting": 23820, - "flyer": 23821, - "shrugs": 23822, - "##sau": 23823, - "intriguing": 23824, - "cones": 23825, - "dictated": 23826, - "mushrooms": 23827, - "medial": 23828, - "##kovsky": 23829, - "##elman": 23830, - "escorting": 23831, - "gaped": 23832, - "##26": 23833, - "godfather": 23834, - "##door": 23835, - "##sell": 23836, - "djs": 23837, - "recaptured": 23838, - "timetable": 23839, - "vila": 23840, - "1710": 23841, - "3a": 23842, - "aerodrome": 23843, - "mortals": 23844, - "scientology": 23845, - "##orne": 23846, - "angelina": 23847, - "mag": 23848, - "convection": 23849, - "unpaid": 23850, - "insertion": 23851, - "intermittent": 23852, - "lego": 23853, - "##nated": 23854, - "endeavor": 23855, - "kota": 23856, - "pereira": 23857, - "##lz": 23858, - "304": 23859, - "bwv": 23860, - "glamorgan": 23861, - "insults": 23862, - "agatha": 23863, - "fey": 23864, - "##cend": 23865, - "fleetwood": 23866, - "mahogany": 23867, - "protruding": 23868, - "steamship": 23869, - "zeta": 23870, - "##arty": 23871, - "mcguire": 23872, - "suspense": 23873, - "##sphere": 23874, - "advising": 23875, - "urges": 23876, - "##wala": 23877, - "hurriedly": 23878, - "meteor": 23879, - "gilded": 23880, - "inline": 23881, - "arroyo": 23882, - "stalker": 23883, - "##oge": 23884, - "excitedly": 23885, - "revered": 23886, - "##cure": 23887, - "earle": 23888, - "introductory": 23889, - "##break": 23890, - "##ilde": 23891, - "mutants": 23892, - "puff": 23893, - "pulses": 23894, - "reinforcement": 23895, - "##haling": 23896, - "curses": 23897, - "lizards": 23898, - "stalk": 23899, - "correlated": 23900, - "##fixed": 23901, - "fallout": 23902, - "macquarie": 23903, - "##unas": 23904, - "bearded": 23905, - "denton": 23906, - "heaving": 23907, - "802": 23908, - "##ocation": 23909, - "winery": 23910, - "assign": 23911, - "dortmund": 23912, - "##lkirk": 23913, - "everest": 23914, - "invariant": 23915, - "charismatic": 23916, - "susie": 23917, - "##elling": 23918, - "bled": 23919, - "lesley": 23920, - "telegram": 23921, - "sumner": 23922, - "bk": 23923, - "##ogen": 23924, - "##к": 23925, - "wilcox": 23926, - "needy": 23927, - "colbert": 23928, - "duval": 23929, - "##iferous": 23930, - "##mbled": 23931, - "allotted": 23932, - "attends": 23933, - "imperative": 23934, - "##hita": 23935, - "replacements": 23936, - "hawker": 23937, - "##inda": 23938, - "insurgency": 23939, - "##zee": 23940, - "##eke": 23941, - "casts": 23942, - "##yla": 23943, - "680": 23944, - "ives": 23945, - "transitioned": 23946, - "##pack": 23947, - "##powering": 23948, - "authoritative": 23949, - "baylor": 23950, - "flex": 23951, - "cringed": 23952, - "plaintiffs": 23953, - "woodrow": 23954, - "##skie": 23955, - "drastic": 23956, - "ape": 23957, - "aroma": 23958, - "unfolded": 23959, - "commotion": 23960, - "nt": 23961, - "preoccupied": 23962, - "theta": 23963, - "routines": 23964, - "lasers": 23965, - "privatization": 23966, - "wand": 23967, - "domino": 23968, - "ek": 23969, - "clenching": 23970, - "nsa": 23971, - "strategically": 23972, - "showered": 23973, - "bile": 23974, - "handkerchief": 23975, - "pere": 23976, - "storing": 23977, - "christophe": 23978, - "insulting": 23979, - "316": 23980, - "nakamura": 23981, - "romani": 23982, - "asiatic": 23983, - "magdalena": 23984, - "palma": 23985, - "cruises": 23986, - "stripping": 23987, - "405": 23988, - "konstantin": 23989, - "soaring": 23990, - "##berman": 23991, - "colloquially": 23992, - "forerunner": 23993, - "havilland": 23994, - "incarcerated": 23995, - "parasites": 23996, - "sincerity": 23997, - "##utus": 23998, - "disks": 23999, - "plank": 24000, - "saigon": 24001, - "##ining": 24002, - "corbin": 24003, - "homo": 24004, - "ornaments": 24005, - "powerhouse": 24006, - "##tlement": 24007, - "chong": 24008, - "fastened": 24009, - "feasibility": 24010, - "idf": 24011, - "morphological": 24012, - "usable": 24013, - "##nish": 24014, - "##zuki": 24015, - "aqueduct": 24016, - "jaguars": 24017, - "keepers": 24018, - "##flies": 24019, - "aleksandr": 24020, - "faust": 24021, - "assigns": 24022, - "ewing": 24023, - "bacterium": 24024, - "hurled": 24025, - "tricky": 24026, - "hungarians": 24027, - "integers": 24028, - "wallis": 24029, - "321": 24030, - "yamaha": 24031, - "##isha": 24032, - "hushed": 24033, - "oblivion": 24034, - "aviator": 24035, - "evangelist": 24036, - "friars": 24037, - "##eller": 24038, - "monograph": 24039, - "ode": 24040, - "##nary": 24041, - "airplanes": 24042, - "labourers": 24043, - "charms": 24044, - "##nee": 24045, - "1661": 24046, - "hagen": 24047, - "tnt": 24048, - "rudder": 24049, - "fiesta": 24050, - "transcript": 24051, - "dorothea": 24052, - "ska": 24053, - "inhibitor": 24054, - "maccabi": 24055, - "retorted": 24056, - "raining": 24057, - "encompassed": 24058, - "clauses": 24059, - "menacing": 24060, - "1642": 24061, - "lineman": 24062, - "##gist": 24063, - "vamps": 24064, - "##ape": 24065, - "##dick": 24066, - "gloom": 24067, - "##rera": 24068, - "dealings": 24069, - "easing": 24070, - "seekers": 24071, - "##nut": 24072, - "##pment": 24073, - "helens": 24074, - "unmanned": 24075, - "##anu": 24076, - "##isson": 24077, - "basics": 24078, - "##amy": 24079, - "##ckman": 24080, - "adjustments": 24081, - "1688": 24082, - "brutality": 24083, - "horne": 24084, - "##zell": 24085, - "sui": 24086, - "##55": 24087, - "##mable": 24088, - "aggregator": 24089, - "##thal": 24090, - "rhino": 24091, - "##drick": 24092, - "##vira": 24093, - "counters": 24094, - "zoom": 24095, - "##01": 24096, - "##rting": 24097, - "mn": 24098, - "montenegrin": 24099, - "packard": 24100, - "##unciation": 24101, - "##♭": 24102, - "##kki": 24103, - "reclaim": 24104, - "scholastic": 24105, - "thugs": 24106, - "pulsed": 24107, - "##icia": 24108, - "syriac": 24109, - "quan": 24110, - "saddam": 24111, - "banda": 24112, - "kobe": 24113, - "blaming": 24114, - "buddies": 24115, - "dissent": 24116, - "##lusion": 24117, - "##usia": 24118, - "corbett": 24119, - "jaya": 24120, - "delle": 24121, - "erratic": 24122, - "lexie": 24123, - "##hesis": 24124, - "435": 24125, - "amiga": 24126, - "hermes": 24127, - "##pressing": 24128, - "##leen": 24129, - "chapels": 24130, - "gospels": 24131, - "jamal": 24132, - "##uating": 24133, - "compute": 24134, - "revolving": 24135, - "warp": 24136, - "##sso": 24137, - "##thes": 24138, - "armory": 24139, - "##eras": 24140, - "##gol": 24141, - "antrim": 24142, - "loki": 24143, - "##kow": 24144, - "##asian": 24145, - "##good": 24146, - "##zano": 24147, - "braid": 24148, - "handwriting": 24149, - "subdistrict": 24150, - "funky": 24151, - "pantheon": 24152, - "##iculate": 24153, - "concurrency": 24154, - "estimation": 24155, - "improper": 24156, - "juliana": 24157, - "##his": 24158, - "newcomers": 24159, - "johnstone": 24160, - "staten": 24161, - "communicated": 24162, - "##oco": 24163, - "##alle": 24164, - "sausage": 24165, - "stormy": 24166, - "##stered": 24167, - "##tters": 24168, - "superfamily": 24169, - "##grade": 24170, - "acidic": 24171, - "collateral": 24172, - "tabloid": 24173, - "##oped": 24174, - "##rza": 24175, - "bladder": 24176, - "austen": 24177, - "##ellant": 24178, - "mcgraw": 24179, - "##hay": 24180, - "hannibal": 24181, - "mein": 24182, - "aquino": 24183, - "lucifer": 24184, - "wo": 24185, - "badger": 24186, - "boar": 24187, - "cher": 24188, - "christensen": 24189, - "greenberg": 24190, - "interruption": 24191, - "##kken": 24192, - "jem": 24193, - "244": 24194, - "mocked": 24195, - "bottoms": 24196, - "cambridgeshire": 24197, - "##lide": 24198, - "sprawling": 24199, - "##bbly": 24200, - "eastwood": 24201, - "ghent": 24202, - "synth": 24203, - "##buck": 24204, - "advisers": 24205, - "##bah": 24206, - "nominally": 24207, - "hapoel": 24208, - "qu": 24209, - "daggers": 24210, - "estranged": 24211, - "fabricated": 24212, - "towels": 24213, - "vinnie": 24214, - "wcw": 24215, - "misunderstanding": 24216, - "anglia": 24217, - "nothin": 24218, - "unmistakable": 24219, - "##dust": 24220, - "##lova": 24221, - "chilly": 24222, - "marquette": 24223, - "truss": 24224, - "##edge": 24225, - "##erine": 24226, - "reece": 24227, - "##lty": 24228, - "##chemist": 24229, - "##connected": 24230, - "272": 24231, - "308": 24232, - "41st": 24233, - "bash": 24234, - "raion": 24235, - "waterfalls": 24236, - "##ump": 24237, - "##main": 24238, - "labyrinth": 24239, - "queue": 24240, - "theorist": 24241, - "##istle": 24242, - "bharatiya": 24243, - "flexed": 24244, - "soundtracks": 24245, - "rooney": 24246, - "leftist": 24247, - "patrolling": 24248, - "wharton": 24249, - "plainly": 24250, - "alleviate": 24251, - "eastman": 24252, - "schuster": 24253, - "topographic": 24254, - "engages": 24255, - "immensely": 24256, - "unbearable": 24257, - "fairchild": 24258, - "1620": 24259, - "dona": 24260, - "lurking": 24261, - "parisian": 24262, - "oliveira": 24263, - "ia": 24264, - "indictment": 24265, - "hahn": 24266, - "bangladeshi": 24267, - "##aster": 24268, - "vivo": 24269, - "##uming": 24270, - "##ential": 24271, - "antonia": 24272, - "expects": 24273, - "indoors": 24274, - "kildare": 24275, - "harlan": 24276, - "##logue": 24277, - "##ogenic": 24278, - "##sities": 24279, - "forgiven": 24280, - "##wat": 24281, - "childish": 24282, - "tavi": 24283, - "##mide": 24284, - "##orra": 24285, - "plausible": 24286, - "grimm": 24287, - "successively": 24288, - "scooted": 24289, - "##bola": 24290, - "##dget": 24291, - "##rith": 24292, - "spartans": 24293, - "emery": 24294, - "flatly": 24295, - "azure": 24296, - "epilogue": 24297, - "##wark": 24298, - "flourish": 24299, - "##iny": 24300, - "##tracted": 24301, - "##overs": 24302, - "##oshi": 24303, - "bestseller": 24304, - "distressed": 24305, - "receipt": 24306, - "spitting": 24307, - "hermit": 24308, - "topological": 24309, - "##cot": 24310, - "drilled": 24311, - "subunit": 24312, - "francs": 24313, - "##layer": 24314, - "eel": 24315, - "##fk": 24316, - "##itas": 24317, - "octopus": 24318, - "footprint": 24319, - "petitions": 24320, - "ufo": 24321, - "##say": 24322, - "##foil": 24323, - "interfering": 24324, - "leaking": 24325, - "palo": 24326, - "##metry": 24327, - "thistle": 24328, - "valiant": 24329, - "##pic": 24330, - "narayan": 24331, - "mcpherson": 24332, - "##fast": 24333, - "gonzales": 24334, - "##ym": 24335, - "##enne": 24336, - "dustin": 24337, - "novgorod": 24338, - "solos": 24339, - "##zman": 24340, - "doin": 24341, - "##raph": 24342, - "##patient": 24343, - "##meyer": 24344, - "soluble": 24345, - "ashland": 24346, - "cuffs": 24347, - "carole": 24348, - "pendleton": 24349, - "whistling": 24350, - "vassal": 24351, - "##river": 24352, - "deviation": 24353, - "revisited": 24354, - "constituents": 24355, - "rallied": 24356, - "rotate": 24357, - "loomed": 24358, - "##eil": 24359, - "##nting": 24360, - "amateurs": 24361, - "augsburg": 24362, - "auschwitz": 24363, - "crowns": 24364, - "skeletons": 24365, - "##cona": 24366, - "bonnet": 24367, - "257": 24368, - "dummy": 24369, - "globalization": 24370, - "simeon": 24371, - "sleeper": 24372, - "mandal": 24373, - "differentiated": 24374, - "##crow": 24375, - "##mare": 24376, - "milne": 24377, - "bundled": 24378, - "exasperated": 24379, - "talmud": 24380, - "owes": 24381, - "segregated": 24382, - "##feng": 24383, - "##uary": 24384, - "dentist": 24385, - "piracy": 24386, - "props": 24387, - "##rang": 24388, - "devlin": 24389, - "##torium": 24390, - "malicious": 24391, - "paws": 24392, - "##laid": 24393, - "dependency": 24394, - "##ergy": 24395, - "##fers": 24396, - "##enna": 24397, - "258": 24398, - "pistons": 24399, - "rourke": 24400, - "jed": 24401, - "grammatical": 24402, - "tres": 24403, - "maha": 24404, - "wig": 24405, - "512": 24406, - "ghostly": 24407, - "jayne": 24408, - "##achal": 24409, - "##creen": 24410, - "##ilis": 24411, - "##lins": 24412, - "##rence": 24413, - "designate": 24414, - "##with": 24415, - "arrogance": 24416, - "cambodian": 24417, - "clones": 24418, - "showdown": 24419, - "throttle": 24420, - "twain": 24421, - "##ception": 24422, - "lobes": 24423, - "metz": 24424, - "nagoya": 24425, - "335": 24426, - "braking": 24427, - "##furt": 24428, - "385": 24429, - "roaming": 24430, - "##minster": 24431, - "amin": 24432, - "crippled": 24433, - "##37": 24434, - "##llary": 24435, - "indifferent": 24436, - "hoffmann": 24437, - "idols": 24438, - "intimidating": 24439, - "1751": 24440, - "261": 24441, - "influenza": 24442, - "memo": 24443, - "onions": 24444, - "1748": 24445, - "bandage": 24446, - "consciously": 24447, - "##landa": 24448, - "##rage": 24449, - "clandestine": 24450, - "observes": 24451, - "swiped": 24452, - "tangle": 24453, - "##ener": 24454, - "##jected": 24455, - "##trum": 24456, - "##bill": 24457, - "##lta": 24458, - "hugs": 24459, - "congresses": 24460, - "josiah": 24461, - "spirited": 24462, - "##dek": 24463, - "humanist": 24464, - "managerial": 24465, - "filmmaking": 24466, - "inmate": 24467, - "rhymes": 24468, - "debuting": 24469, - "grimsby": 24470, - "ur": 24471, - "##laze": 24472, - "duplicate": 24473, - "vigor": 24474, - "##tf": 24475, - "republished": 24476, - "bolshevik": 24477, - "refurbishment": 24478, - "antibiotics": 24479, - "martini": 24480, - "methane": 24481, - "newscasts": 24482, - "royale": 24483, - "horizons": 24484, - "levant": 24485, - "iain": 24486, - "visas": 24487, - "##ischen": 24488, - "paler": 24489, - "##around": 24490, - "manifestation": 24491, - "snuck": 24492, - "alf": 24493, - "chop": 24494, - "futile": 24495, - "pedestal": 24496, - "rehab": 24497, - "##kat": 24498, - "bmg": 24499, - "kerman": 24500, - "res": 24501, - "fairbanks": 24502, - "jarrett": 24503, - "abstraction": 24504, - "saharan": 24505, - "##zek": 24506, - "1746": 24507, - "procedural": 24508, - "clearer": 24509, - "kincaid": 24510, - "sash": 24511, - "luciano": 24512, - "##ffey": 24513, - "crunch": 24514, - "helmut": 24515, - "##vara": 24516, - "revolutionaries": 24517, - "##tute": 24518, - "creamy": 24519, - "leach": 24520, - "##mmon": 24521, - "1747": 24522, - "permitting": 24523, - "nes": 24524, - "plight": 24525, - "wendell": 24526, - "##lese": 24527, - "contra": 24528, - "ts": 24529, - "clancy": 24530, - "ipa": 24531, - "mach": 24532, - "staples": 24533, - "autopsy": 24534, - "disturbances": 24535, - "nueva": 24536, - "karin": 24537, - "pontiac": 24538, - "##uding": 24539, - "proxy": 24540, - "venerable": 24541, - "haunt": 24542, - "leto": 24543, - "bergman": 24544, - "expands": 24545, - "##helm": 24546, - "wal": 24547, - "##pipe": 24548, - "canning": 24549, - "celine": 24550, - "cords": 24551, - "obesity": 24552, - "##enary": 24553, - "intrusion": 24554, - "planner": 24555, - "##phate": 24556, - "reasoned": 24557, - "sequencing": 24558, - "307": 24559, - "harrow": 24560, - "##chon": 24561, - "##dora": 24562, - "marred": 24563, - "mcintyre": 24564, - "repay": 24565, - "tarzan": 24566, - "darting": 24567, - "248": 24568, - "harrisburg": 24569, - "margarita": 24570, - "repulsed": 24571, - "##hur": 24572, - "##lding": 24573, - "belinda": 24574, - "hamburger": 24575, - "novo": 24576, - "compliant": 24577, - "runways": 24578, - "bingham": 24579, - "registrar": 24580, - "skyscraper": 24581, - "ic": 24582, - "cuthbert": 24583, - "improvisation": 24584, - "livelihood": 24585, - "##corp": 24586, - "##elial": 24587, - "admiring": 24588, - "##dened": 24589, - "sporadic": 24590, - "believer": 24591, - "casablanca": 24592, - "popcorn": 24593, - "##29": 24594, - "asha": 24595, - "shovel": 24596, - "##bek": 24597, - "##dice": 24598, - "coiled": 24599, - "tangible": 24600, - "##dez": 24601, - "casper": 24602, - "elsie": 24603, - "resin": 24604, - "tenderness": 24605, - "rectory": 24606, - "##ivision": 24607, - "avail": 24608, - "sonar": 24609, - "##mori": 24610, - "boutique": 24611, - "##dier": 24612, - "guerre": 24613, - "bathed": 24614, - "upbringing": 24615, - "vaulted": 24616, - "sandals": 24617, - "blessings": 24618, - "##naut": 24619, - "##utnant": 24620, - "1680": 24621, - "306": 24622, - "foxes": 24623, - "pia": 24624, - "corrosion": 24625, - "hesitantly": 24626, - "confederates": 24627, - "crystalline": 24628, - "footprints": 24629, - "shapiro": 24630, - "tirana": 24631, - "valentin": 24632, - "drones": 24633, - "45th": 24634, - "microscope": 24635, - "shipments": 24636, - "texted": 24637, - "inquisition": 24638, - "wry": 24639, - "guernsey": 24640, - "unauthorized": 24641, - "resigning": 24642, - "760": 24643, - "ripple": 24644, - "schubert": 24645, - "stu": 24646, - "reassure": 24647, - "felony": 24648, - "##ardo": 24649, - "brittle": 24650, - "koreans": 24651, - "##havan": 24652, - "##ives": 24653, - "dun": 24654, - "implicit": 24655, - "tyres": 24656, - "##aldi": 24657, - "##lth": 24658, - "magnolia": 24659, - "##ehan": 24660, - "##puri": 24661, - "##poulos": 24662, - "aggressively": 24663, - "fei": 24664, - "gr": 24665, - "familiarity": 24666, - "##poo": 24667, - "indicative": 24668, - "##trust": 24669, - "fundamentally": 24670, - "jimmie": 24671, - "overrun": 24672, - "395": 24673, - "anchors": 24674, - "moans": 24675, - "##opus": 24676, - "britannia": 24677, - "armagh": 24678, - "##ggle": 24679, - "purposely": 24680, - "seizing": 24681, - "##vao": 24682, - "bewildered": 24683, - "mundane": 24684, - "avoidance": 24685, - "cosmopolitan": 24686, - "geometridae": 24687, - "quartermaster": 24688, - "caf": 24689, - "415": 24690, - "chatter": 24691, - "engulfed": 24692, - "gleam": 24693, - "purge": 24694, - "##icate": 24695, - "juliette": 24696, - "jurisprudence": 24697, - "guerra": 24698, - "revisions": 24699, - "##bn": 24700, - "casimir": 24701, - "brew": 24702, - "##jm": 24703, - "1749": 24704, - "clapton": 24705, - "cloudy": 24706, - "conde": 24707, - "hermitage": 24708, - "278": 24709, - "simulations": 24710, - "torches": 24711, - "vincenzo": 24712, - "matteo": 24713, - "##rill": 24714, - "hidalgo": 24715, - "booming": 24716, - "westbound": 24717, - "accomplishment": 24718, - "tentacles": 24719, - "unaffected": 24720, - "##sius": 24721, - "annabelle": 24722, - "flopped": 24723, - "sloping": 24724, - "##litz": 24725, - "dreamer": 24726, - "interceptor": 24727, - "vu": 24728, - "##loh": 24729, - "consecration": 24730, - "copying": 24731, - "messaging": 24732, - "breaker": 24733, - "climates": 24734, - "hospitalized": 24735, - "1752": 24736, - "torino": 24737, - "afternoons": 24738, - "winfield": 24739, - "witnessing": 24740, - "##teacher": 24741, - "breakers": 24742, - "choirs": 24743, - "sawmill": 24744, - "coldly": 24745, - "##ege": 24746, - "sipping": 24747, - "haste": 24748, - "uninhabited": 24749, - "conical": 24750, - "bibliography": 24751, - "pamphlets": 24752, - "severn": 24753, - "edict": 24754, - "##oca": 24755, - "deux": 24756, - "illnesses": 24757, - "grips": 24758, - "##pl": 24759, - "rehearsals": 24760, - "sis": 24761, - "thinkers": 24762, - "tame": 24763, - "##keepers": 24764, - "1690": 24765, - "acacia": 24766, - "reformer": 24767, - "##osed": 24768, - "##rys": 24769, - "shuffling": 24770, - "##iring": 24771, - "##shima": 24772, - "eastbound": 24773, - "ionic": 24774, - "rhea": 24775, - "flees": 24776, - "littered": 24777, - "##oum": 24778, - "rocker": 24779, - "vomiting": 24780, - "groaning": 24781, - "champ": 24782, - "overwhelmingly": 24783, - "civilizations": 24784, - "paces": 24785, - "sloop": 24786, - "adoptive": 24787, - "##tish": 24788, - "skaters": 24789, - "##vres": 24790, - "aiding": 24791, - "mango": 24792, - "##joy": 24793, - "nikola": 24794, - "shriek": 24795, - "##ignon": 24796, - "pharmaceuticals": 24797, - "##mg": 24798, - "tuna": 24799, - "calvert": 24800, - "gustavo": 24801, - "stocked": 24802, - "yearbook": 24803, - "##urai": 24804, - "##mana": 24805, - "computed": 24806, - "subsp": 24807, - "riff": 24808, - "hanoi": 24809, - "kelvin": 24810, - "hamid": 24811, - "moors": 24812, - "pastures": 24813, - "summons": 24814, - "jihad": 24815, - "nectar": 24816, - "##ctors": 24817, - "bayou": 24818, - "untitled": 24819, - "pleasing": 24820, - "vastly": 24821, - "republics": 24822, - "intellect": 24823, - "##η": 24824, - "##ulio": 24825, - "##tou": 24826, - "crumbling": 24827, - "stylistic": 24828, - "sb": 24829, - "##ی": 24830, - "consolation": 24831, - "frequented": 24832, - "h₂o": 24833, - "walden": 24834, - "widows": 24835, - "##iens": 24836, - "404": 24837, - "##ignment": 24838, - "chunks": 24839, - "improves": 24840, - "288": 24841, - "grit": 24842, - "recited": 24843, - "##dev": 24844, - "snarl": 24845, - "sociological": 24846, - "##arte": 24847, - "##gul": 24848, - "inquired": 24849, - "##held": 24850, - "bruise": 24851, - "clube": 24852, - "consultancy": 24853, - "homogeneous": 24854, - "hornets": 24855, - "multiplication": 24856, - "pasta": 24857, - "prick": 24858, - "savior": 24859, - "##grin": 24860, - "##kou": 24861, - "##phile": 24862, - "yoon": 24863, - "##gara": 24864, - "grimes": 24865, - "vanishing": 24866, - "cheering": 24867, - "reacting": 24868, - "bn": 24869, - "distillery": 24870, - "##quisite": 24871, - "##vity": 24872, - "coe": 24873, - "dockyard": 24874, - "massif": 24875, - "##jord": 24876, - "escorts": 24877, - "voss": 24878, - "##valent": 24879, - "byte": 24880, - "chopped": 24881, - "hawke": 24882, - "illusions": 24883, - "workings": 24884, - "floats": 24885, - "##koto": 24886, - "##vac": 24887, - "kv": 24888, - "annapolis": 24889, - "madden": 24890, - "##onus": 24891, - "alvaro": 24892, - "noctuidae": 24893, - "##cum": 24894, - "##scopic": 24895, - "avenge": 24896, - "steamboat": 24897, - "forte": 24898, - "illustrates": 24899, - "erika": 24900, - "##trip": 24901, - "570": 24902, - "dew": 24903, - "nationalities": 24904, - "bran": 24905, - "manifested": 24906, - "thirsty": 24907, - "diversified": 24908, - "muscled": 24909, - "reborn": 24910, - "##standing": 24911, - "arson": 24912, - "##lessness": 24913, - "##dran": 24914, - "##logram": 24915, - "##boys": 24916, - "##kushima": 24917, - "##vious": 24918, - "willoughby": 24919, - "##phobia": 24920, - "286": 24921, - "alsace": 24922, - "dashboard": 24923, - "yuki": 24924, - "##chai": 24925, - "granville": 24926, - "myspace": 24927, - "publicized": 24928, - "tricked": 24929, - "##gang": 24930, - "adjective": 24931, - "##ater": 24932, - "relic": 24933, - "reorganisation": 24934, - "enthusiastically": 24935, - "indications": 24936, - "saxe": 24937, - "##lassified": 24938, - "consolidate": 24939, - "iec": 24940, - "padua": 24941, - "helplessly": 24942, - "ramps": 24943, - "renaming": 24944, - "regulars": 24945, - "pedestrians": 24946, - "accents": 24947, - "convicts": 24948, - "inaccurate": 24949, - "lowers": 24950, - "mana": 24951, - "##pati": 24952, - "barrie": 24953, - "bjp": 24954, - "outta": 24955, - "someplace": 24956, - "berwick": 24957, - "flanking": 24958, - "invoked": 24959, - "marrow": 24960, - "sparsely": 24961, - "excerpts": 24962, - "clothed": 24963, - "rei": 24964, - "##ginal": 24965, - "wept": 24966, - "##straße": 24967, - "##vish": 24968, - "alexa": 24969, - "excel": 24970, - "##ptive": 24971, - "membranes": 24972, - "aquitaine": 24973, - "creeks": 24974, - "cutler": 24975, - "sheppard": 24976, - "implementations": 24977, - "ns": 24978, - "##dur": 24979, - "fragrance": 24980, - "budge": 24981, - "concordia": 24982, - "magnesium": 24983, - "marcelo": 24984, - "##antes": 24985, - "gladly": 24986, - "vibrating": 24987, - "##rral": 24988, - "##ggles": 24989, - "montrose": 24990, - "##omba": 24991, - "lew": 24992, - "seamus": 24993, - "1630": 24994, - "cocky": 24995, - "##ament": 24996, - "##uen": 24997, - "bjorn": 24998, - "##rrick": 24999, - "fielder": 25000, - "fluttering": 25001, - "##lase": 25002, - "methyl": 25003, - "kimberley": 25004, - "mcdowell": 25005, - "reductions": 25006, - "barbed": 25007, - "##jic": 25008, - "##tonic": 25009, - "aeronautical": 25010, - "condensed": 25011, - "distracting": 25012, - "##promising": 25013, - "huffed": 25014, - "##cala": 25015, - "##sle": 25016, - "claudius": 25017, - "invincible": 25018, - "missy": 25019, - "pious": 25020, - "balthazar": 25021, - "ci": 25022, - "##lang": 25023, - "butte": 25024, - "combo": 25025, - "orson": 25026, - "##dication": 25027, - "myriad": 25028, - "1707": 25029, - "silenced": 25030, - "##fed": 25031, - "##rh": 25032, - "coco": 25033, - "netball": 25034, - "yourselves": 25035, - "##oza": 25036, - "clarify": 25037, - "heller": 25038, - "peg": 25039, - "durban": 25040, - "etudes": 25041, - "offender": 25042, - "roast": 25043, - "blackmail": 25044, - "curvature": 25045, - "##woods": 25046, - "vile": 25047, - "309": 25048, - "illicit": 25049, - "suriname": 25050, - "##linson": 25051, - "overture": 25052, - "1685": 25053, - "bubbling": 25054, - "gymnast": 25055, - "tucking": 25056, - "##mming": 25057, - "##ouin": 25058, - "maldives": 25059, - "##bala": 25060, - "gurney": 25061, - "##dda": 25062, - "##eased": 25063, - "##oides": 25064, - "backside": 25065, - "pinto": 25066, - "jars": 25067, - "racehorse": 25068, - "tending": 25069, - "##rdial": 25070, - "baronetcy": 25071, - "wiener": 25072, - "duly": 25073, - "##rke": 25074, - "barbarian": 25075, - "cupping": 25076, - "flawed": 25077, - "##thesis": 25078, - "bertha": 25079, - "pleistocene": 25080, - "puddle": 25081, - "swearing": 25082, - "##nob": 25083, - "##tically": 25084, - "fleeting": 25085, - "prostate": 25086, - "amulet": 25087, - "educating": 25088, - "##mined": 25089, - "##iti": 25090, - "##tler": 25091, - "75th": 25092, - "jens": 25093, - "respondents": 25094, - "analytics": 25095, - "cavaliers": 25096, - "papacy": 25097, - "raju": 25098, - "##iente": 25099, - "##ulum": 25100, - "##tip": 25101, - "funnel": 25102, - "271": 25103, - "disneyland": 25104, - "##lley": 25105, - "sociologist": 25106, - "##iam": 25107, - "2500": 25108, - "faulkner": 25109, - "louvre": 25110, - "menon": 25111, - "##dson": 25112, - "276": 25113, - "##ower": 25114, - "afterlife": 25115, - "mannheim": 25116, - "peptide": 25117, - "referees": 25118, - "comedians": 25119, - "meaningless": 25120, - "##anger": 25121, - "##laise": 25122, - "fabrics": 25123, - "hurley": 25124, - "renal": 25125, - "sleeps": 25126, - "##bour": 25127, - "##icle": 25128, - "breakout": 25129, - "kristin": 25130, - "roadside": 25131, - "animator": 25132, - "clover": 25133, - "disdain": 25134, - "unsafe": 25135, - "redesign": 25136, - "##urity": 25137, - "firth": 25138, - "barnsley": 25139, - "portage": 25140, - "reset": 25141, - "narrows": 25142, - "268": 25143, - "commandos": 25144, - "expansive": 25145, - "speechless": 25146, - "tubular": 25147, - "##lux": 25148, - "essendon": 25149, - "eyelashes": 25150, - "smashwords": 25151, - "##yad": 25152, - "##bang": 25153, - "##claim": 25154, - "craved": 25155, - "sprinted": 25156, - "chet": 25157, - "somme": 25158, - "astor": 25159, - "wrocław": 25160, - "orton": 25161, - "266": 25162, - "bane": 25163, - "##erving": 25164, - "##uing": 25165, - "mischief": 25166, - "##amps": 25167, - "##sund": 25168, - "scaling": 25169, - "terre": 25170, - "##xious": 25171, - "impairment": 25172, - "offenses": 25173, - "undermine": 25174, - "moi": 25175, - "soy": 25176, - "contiguous": 25177, - "arcadia": 25178, - "inuit": 25179, - "seam": 25180, - "##tops": 25181, - "macbeth": 25182, - "rebelled": 25183, - "##icative": 25184, - "##iot": 25185, - "590": 25186, - "elaborated": 25187, - "frs": 25188, - "uniformed": 25189, - "##dberg": 25190, - "259": 25191, - "powerless": 25192, - "priscilla": 25193, - "stimulated": 25194, - "980": 25195, - "qc": 25196, - "arboretum": 25197, - "frustrating": 25198, - "trieste": 25199, - "bullock": 25200, - "##nified": 25201, - "enriched": 25202, - "glistening": 25203, - "intern": 25204, - "##adia": 25205, - "locus": 25206, - "nouvelle": 25207, - "ollie": 25208, - "ike": 25209, - "lash": 25210, - "starboard": 25211, - "ee": 25212, - "tapestry": 25213, - "headlined": 25214, - "hove": 25215, - "rigged": 25216, - "##vite": 25217, - "pollock": 25218, - "##yme": 25219, - "thrive": 25220, - "clustered": 25221, - "cas": 25222, - "roi": 25223, - "gleamed": 25224, - "olympiad": 25225, - "##lino": 25226, - "pressured": 25227, - "regimes": 25228, - "##hosis": 25229, - "##lick": 25230, - "ripley": 25231, - "##ophone": 25232, - "kickoff": 25233, - "gallon": 25234, - "rockwell": 25235, - "##arable": 25236, - "crusader": 25237, - "glue": 25238, - "revolutions": 25239, - "scrambling": 25240, - "1714": 25241, - "grover": 25242, - "##jure": 25243, - "englishman": 25244, - "aztec": 25245, - "263": 25246, - "contemplating": 25247, - "coven": 25248, - "ipad": 25249, - "preach": 25250, - "triumphant": 25251, - "tufts": 25252, - "##esian": 25253, - "rotational": 25254, - "##phus": 25255, - "328": 25256, - "falkland": 25257, - "##brates": 25258, - "strewn": 25259, - "clarissa": 25260, - "rejoin": 25261, - "environmentally": 25262, - "glint": 25263, - "banded": 25264, - "drenched": 25265, - "moat": 25266, - "albanians": 25267, - "johor": 25268, - "rr": 25269, - "maestro": 25270, - "malley": 25271, - "nouveau": 25272, - "shaded": 25273, - "taxonomy": 25274, - "v6": 25275, - "adhere": 25276, - "bunk": 25277, - "airfields": 25278, - "##ritan": 25279, - "1741": 25280, - "encompass": 25281, - "remington": 25282, - "tran": 25283, - "##erative": 25284, - "amelie": 25285, - "mazda": 25286, - "friar": 25287, - "morals": 25288, - "passions": 25289, - "##zai": 25290, - "breadth": 25291, - "vis": 25292, - "##hae": 25293, - "argus": 25294, - "burnham": 25295, - "caressing": 25296, - "insider": 25297, - "rudd": 25298, - "##imov": 25299, - "##mini": 25300, - "##rso": 25301, - "italianate": 25302, - "murderous": 25303, - "textual": 25304, - "wainwright": 25305, - "armada": 25306, - "bam": 25307, - "weave": 25308, - "timer": 25309, - "##taken": 25310, - "##nh": 25311, - "fra": 25312, - "##crest": 25313, - "ardent": 25314, - "salazar": 25315, - "taps": 25316, - "tunis": 25317, - "##ntino": 25318, - "allegro": 25319, - "gland": 25320, - "philanthropic": 25321, - "##chester": 25322, - "implication": 25323, - "##optera": 25324, - "esq": 25325, - "judas": 25326, - "noticeably": 25327, - "wynn": 25328, - "##dara": 25329, - "inched": 25330, - "indexed": 25331, - "crises": 25332, - "villiers": 25333, - "bandit": 25334, - "royalties": 25335, - "patterned": 25336, - "cupboard": 25337, - "interspersed": 25338, - "accessory": 25339, - "isla": 25340, - "kendrick": 25341, - "entourage": 25342, - "stitches": 25343, - "##esthesia": 25344, - "headwaters": 25345, - "##ior": 25346, - "interlude": 25347, - "distraught": 25348, - "draught": 25349, - "1727": 25350, - "##basket": 25351, - "biased": 25352, - "sy": 25353, - "transient": 25354, - "triad": 25355, - "subgenus": 25356, - "adapting": 25357, - "kidd": 25358, - "shortstop": 25359, - "##umatic": 25360, - "dimly": 25361, - "spiked": 25362, - "mcleod": 25363, - "reprint": 25364, - "nellie": 25365, - "pretoria": 25366, - "windmill": 25367, - "##cek": 25368, - "singled": 25369, - "##mps": 25370, - "273": 25371, - "reunite": 25372, - "##orous": 25373, - "747": 25374, - "bankers": 25375, - "outlying": 25376, - "##omp": 25377, - "##ports": 25378, - "##tream": 25379, - "apologies": 25380, - "cosmetics": 25381, - "patsy": 25382, - "##deh": 25383, - "##ocks": 25384, - "##yson": 25385, - "bender": 25386, - "nantes": 25387, - "serene": 25388, - "##nad": 25389, - "lucha": 25390, - "mmm": 25391, - "323": 25392, - "##cius": 25393, - "##gli": 25394, - "cmll": 25395, - "coinage": 25396, - "nestor": 25397, - "juarez": 25398, - "##rook": 25399, - "smeared": 25400, - "sprayed": 25401, - "twitching": 25402, - "sterile": 25403, - "irina": 25404, - "embodied": 25405, - "juveniles": 25406, - "enveloped": 25407, - "miscellaneous": 25408, - "cancers": 25409, - "dq": 25410, - "gulped": 25411, - "luisa": 25412, - "crested": 25413, - "swat": 25414, - "donegal": 25415, - "ref": 25416, - "##anov": 25417, - "##acker": 25418, - "hearst": 25419, - "mercantile": 25420, - "##lika": 25421, - "doorbell": 25422, - "ua": 25423, - "vicki": 25424, - "##alla": 25425, - "##som": 25426, - "bilbao": 25427, - "psychologists": 25428, - "stryker": 25429, - "sw": 25430, - "horsemen": 25431, - "turkmenistan": 25432, - "wits": 25433, - "##national": 25434, - "anson": 25435, - "mathew": 25436, - "screenings": 25437, - "##umb": 25438, - "rihanna": 25439, - "##agne": 25440, - "##nessy": 25441, - "aisles": 25442, - "##iani": 25443, - "##osphere": 25444, - "hines": 25445, - "kenton": 25446, - "saskatoon": 25447, - "tasha": 25448, - "truncated": 25449, - "##champ": 25450, - "##itan": 25451, - "mildred": 25452, - "advises": 25453, - "fredrik": 25454, - "interpreting": 25455, - "inhibitors": 25456, - "##athi": 25457, - "spectroscopy": 25458, - "##hab": 25459, - "##kong": 25460, - "karim": 25461, - "panda": 25462, - "##oia": 25463, - "##nail": 25464, - "##vc": 25465, - "conqueror": 25466, - "kgb": 25467, - "leukemia": 25468, - "##dity": 25469, - "arrivals": 25470, - "cheered": 25471, - "pisa": 25472, - "phosphorus": 25473, - "shielded": 25474, - "##riated": 25475, - "mammal": 25476, - "unitarian": 25477, - "urgently": 25478, - "chopin": 25479, - "sanitary": 25480, - "##mission": 25481, - "spicy": 25482, - "drugged": 25483, - "hinges": 25484, - "##tort": 25485, - "tipping": 25486, - "trier": 25487, - "impoverished": 25488, - "westchester": 25489, - "##caster": 25490, - "267": 25491, - "epoch": 25492, - "nonstop": 25493, - "##gman": 25494, - "##khov": 25495, - "aromatic": 25496, - "centrally": 25497, - "cerro": 25498, - "##tively": 25499, - "##vio": 25500, - "billions": 25501, - "modulation": 25502, - "sedimentary": 25503, - "283": 25504, - "facilitating": 25505, - "outrageous": 25506, - "goldstein": 25507, - "##eak": 25508, - "##kt": 25509, - "ld": 25510, - "maitland": 25511, - "penultimate": 25512, - "pollard": 25513, - "##dance": 25514, - "fleets": 25515, - "spaceship": 25516, - "vertebrae": 25517, - "##nig": 25518, - "alcoholism": 25519, - "als": 25520, - "recital": 25521, - "##bham": 25522, - "##ference": 25523, - "##omics": 25524, - "m2": 25525, - "##bm": 25526, - "trois": 25527, - "##tropical": 25528, - "##в": 25529, - "commemorates": 25530, - "##meric": 25531, - "marge": 25532, - "##raction": 25533, - "1643": 25534, - "670": 25535, - "cosmetic": 25536, - "ravaged": 25537, - "##ige": 25538, - "catastrophe": 25539, - "eng": 25540, - "##shida": 25541, - "albrecht": 25542, - "arterial": 25543, - "bellamy": 25544, - "decor": 25545, - "harmon": 25546, - "##rde": 25547, - "bulbs": 25548, - "synchronized": 25549, - "vito": 25550, - "easiest": 25551, - "shetland": 25552, - "shielding": 25553, - "wnba": 25554, - "##glers": 25555, - "##ssar": 25556, - "##riam": 25557, - "brianna": 25558, - "cumbria": 25559, - "##aceous": 25560, - "##rard": 25561, - "cores": 25562, - "thayer": 25563, - "##nsk": 25564, - "brood": 25565, - "hilltop": 25566, - "luminous": 25567, - "carts": 25568, - "keynote": 25569, - "larkin": 25570, - "logos": 25571, - "##cta": 25572, - "##ا": 25573, - "##mund": 25574, - "##quay": 25575, - "lilith": 25576, - "tinted": 25577, - "277": 25578, - "wrestle": 25579, - "mobilization": 25580, - "##uses": 25581, - "sequential": 25582, - "siam": 25583, - "bloomfield": 25584, - "takahashi": 25585, - "274": 25586, - "##ieving": 25587, - "presenters": 25588, - "ringo": 25589, - "blazed": 25590, - "witty": 25591, - "##oven": 25592, - "##ignant": 25593, - "devastation": 25594, - "haydn": 25595, - "harmed": 25596, - "newt": 25597, - "therese": 25598, - "##peed": 25599, - "gershwin": 25600, - "molina": 25601, - "rabbis": 25602, - "sudanese": 25603, - "001": 25604, - "innate": 25605, - "restarted": 25606, - "##sack": 25607, - "##fus": 25608, - "slices": 25609, - "wb": 25610, - "##shah": 25611, - "enroll": 25612, - "hypothetical": 25613, - "hysterical": 25614, - "1743": 25615, - "fabio": 25616, - "indefinite": 25617, - "warped": 25618, - "##hg": 25619, - "exchanging": 25620, - "525": 25621, - "unsuitable": 25622, - "##sboro": 25623, - "gallo": 25624, - "1603": 25625, - "bret": 25626, - "cobalt": 25627, - "homemade": 25628, - "##hunter": 25629, - "mx": 25630, - "operatives": 25631, - "##dhar": 25632, - "terraces": 25633, - "durable": 25634, - "latch": 25635, - "pens": 25636, - "whorls": 25637, - "##ctuated": 25638, - "##eaux": 25639, - "billing": 25640, - "ligament": 25641, - "succumbed": 25642, - "##gly": 25643, - "regulators": 25644, - "spawn": 25645, - "##brick": 25646, - "##stead": 25647, - "filmfare": 25648, - "rochelle": 25649, - "##nzo": 25650, - "1725": 25651, - "circumstance": 25652, - "saber": 25653, - "supplements": 25654, - "##nsky": 25655, - "##tson": 25656, - "crowe": 25657, - "wellesley": 25658, - "carrot": 25659, - "##9th": 25660, - "##movable": 25661, - "primate": 25662, - "drury": 25663, - "sincerely": 25664, - "topical": 25665, - "##mad": 25666, - "##rao": 25667, - "callahan": 25668, - "kyiv": 25669, - "smarter": 25670, - "tits": 25671, - "undo": 25672, - "##yeh": 25673, - "announcements": 25674, - "anthologies": 25675, - "barrio": 25676, - "nebula": 25677, - "##islaus": 25678, - "##shaft": 25679, - "##tyn": 25680, - "bodyguards": 25681, - "2021": 25682, - "assassinate": 25683, - "barns": 25684, - "emmett": 25685, - "scully": 25686, - "##mah": 25687, - "##yd": 25688, - "##eland": 25689, - "##tino": 25690, - "##itarian": 25691, - "demoted": 25692, - "gorman": 25693, - "lashed": 25694, - "prized": 25695, - "adventist": 25696, - "writ": 25697, - "##gui": 25698, - "alla": 25699, - "invertebrates": 25700, - "##ausen": 25701, - "1641": 25702, - "amman": 25703, - "1742": 25704, - "align": 25705, - "healy": 25706, - "redistribution": 25707, - "##gf": 25708, - "##rize": 25709, - "insulation": 25710, - "##drop": 25711, - "adherents": 25712, - "hezbollah": 25713, - "vitro": 25714, - "ferns": 25715, - "yanking": 25716, - "269": 25717, - "php": 25718, - "registering": 25719, - "uppsala": 25720, - "cheerleading": 25721, - "confines": 25722, - "mischievous": 25723, - "tully": 25724, - "##ross": 25725, - "49th": 25726, - "docked": 25727, - "roam": 25728, - "stipulated": 25729, - "pumpkin": 25730, - "##bry": 25731, - "prompt": 25732, - "##ezer": 25733, - "blindly": 25734, - "shuddering": 25735, - "craftsmen": 25736, - "frail": 25737, - "scented": 25738, - "katharine": 25739, - "scramble": 25740, - "shaggy": 25741, - "sponge": 25742, - "helix": 25743, - "zaragoza": 25744, - "279": 25745, - "##52": 25746, - "43rd": 25747, - "backlash": 25748, - "fontaine": 25749, - "seizures": 25750, - "posse": 25751, - "cowan": 25752, - "nonfiction": 25753, - "telenovela": 25754, - "wwii": 25755, - "hammered": 25756, - "undone": 25757, - "##gpur": 25758, - "encircled": 25759, - "irs": 25760, - "##ivation": 25761, - "artefacts": 25762, - "oneself": 25763, - "searing": 25764, - "smallpox": 25765, - "##belle": 25766, - "##osaurus": 25767, - "shandong": 25768, - "breached": 25769, - "upland": 25770, - "blushing": 25771, - "rankin": 25772, - "infinitely": 25773, - "psyche": 25774, - "tolerated": 25775, - "docking": 25776, - "evicted": 25777, - "##col": 25778, - "unmarked": 25779, - "##lving": 25780, - "gnome": 25781, - "lettering": 25782, - "litres": 25783, - "musique": 25784, - "##oint": 25785, - "benevolent": 25786, - "##jal": 25787, - "blackened": 25788, - "##anna": 25789, - "mccall": 25790, - "racers": 25791, - "tingle": 25792, - "##ocene": 25793, - "##orestation": 25794, - "introductions": 25795, - "radically": 25796, - "292": 25797, - "##hiff": 25798, - "##باد": 25799, - "1610": 25800, - "1739": 25801, - "munchen": 25802, - "plead": 25803, - "##nka": 25804, - "condo": 25805, - "scissors": 25806, - "##sight": 25807, - "##tens": 25808, - "apprehension": 25809, - "##cey": 25810, - "##yin": 25811, - "hallmark": 25812, - "watering": 25813, - "formulas": 25814, - "sequels": 25815, - "##llas": 25816, - "aggravated": 25817, - "bae": 25818, - "commencing": 25819, - "##building": 25820, - "enfield": 25821, - "prohibits": 25822, - "marne": 25823, - "vedic": 25824, - "civilized": 25825, - "euclidean": 25826, - "jagger": 25827, - "beforehand": 25828, - "blasts": 25829, - "dumont": 25830, - "##arney": 25831, - "##nem": 25832, - "740": 25833, - "conversions": 25834, - "hierarchical": 25835, - "rios": 25836, - "simulator": 25837, - "##dya": 25838, - "##lellan": 25839, - "hedges": 25840, - "oleg": 25841, - "thrusts": 25842, - "shadowed": 25843, - "darby": 25844, - "maximize": 25845, - "1744": 25846, - "gregorian": 25847, - "##nded": 25848, - "##routed": 25849, - "sham": 25850, - "unspecified": 25851, - "##hog": 25852, - "emory": 25853, - "factual": 25854, - "##smo": 25855, - "##tp": 25856, - "fooled": 25857, - "##rger": 25858, - "ortega": 25859, - "wellness": 25860, - "marlon": 25861, - "##oton": 25862, - "##urance": 25863, - "casket": 25864, - "keating": 25865, - "ley": 25866, - "enclave": 25867, - "##ayan": 25868, - "char": 25869, - "influencing": 25870, - "jia": 25871, - "##chenko": 25872, - "412": 25873, - "ammonia": 25874, - "erebidae": 25875, - "incompatible": 25876, - "violins": 25877, - "cornered": 25878, - "##arat": 25879, - "grooves": 25880, - "astronauts": 25881, - "columbian": 25882, - "rampant": 25883, - "fabrication": 25884, - "kyushu": 25885, - "mahmud": 25886, - "vanish": 25887, - "##dern": 25888, - "mesopotamia": 25889, - "##lete": 25890, - "ict": 25891, - "##rgen": 25892, - "caspian": 25893, - "kenji": 25894, - "pitted": 25895, - "##vered": 25896, - "999": 25897, - "grimace": 25898, - "roanoke": 25899, - "tchaikovsky": 25900, - "twinned": 25901, - "##analysis": 25902, - "##awan": 25903, - "xinjiang": 25904, - "arias": 25905, - "clemson": 25906, - "kazakh": 25907, - "sizable": 25908, - "1662": 25909, - "##khand": 25910, - "##vard": 25911, - "plunge": 25912, - "tatum": 25913, - "vittorio": 25914, - "##nden": 25915, - "cholera": 25916, - "##dana": 25917, - "##oper": 25918, - "bracing": 25919, - "indifference": 25920, - "projectile": 25921, - "superliga": 25922, - "##chee": 25923, - "realises": 25924, - "upgrading": 25925, - "299": 25926, - "porte": 25927, - "retribution": 25928, - "##vies": 25929, - "nk": 25930, - "stil": 25931, - "##resses": 25932, - "ama": 25933, - "bureaucracy": 25934, - "blackberry": 25935, - "bosch": 25936, - "testosterone": 25937, - "collapses": 25938, - "greer": 25939, - "##pathic": 25940, - "ioc": 25941, - "fifties": 25942, - "malls": 25943, - "##erved": 25944, - "bao": 25945, - "baskets": 25946, - "adolescents": 25947, - "siegfried": 25948, - "##osity": 25949, - "##tosis": 25950, - "mantra": 25951, - "detecting": 25952, - "existent": 25953, - "fledgling": 25954, - "##cchi": 25955, - "dissatisfied": 25956, - "gan": 25957, - "telecommunication": 25958, - "mingled": 25959, - "sobbed": 25960, - "6000": 25961, - "controversies": 25962, - "outdated": 25963, - "taxis": 25964, - "##raus": 25965, - "fright": 25966, - "slams": 25967, - "##lham": 25968, - "##fect": 25969, - "##tten": 25970, - "detectors": 25971, - "fetal": 25972, - "tanned": 25973, - "##uw": 25974, - "fray": 25975, - "goth": 25976, - "olympian": 25977, - "skipping": 25978, - "mandates": 25979, - "scratches": 25980, - "sheng": 25981, - "unspoken": 25982, - "hyundai": 25983, - "tracey": 25984, - "hotspur": 25985, - "restrictive": 25986, - "##buch": 25987, - "americana": 25988, - "mundo": 25989, - "##bari": 25990, - "burroughs": 25991, - "diva": 25992, - "vulcan": 25993, - "##6th": 25994, - "distinctions": 25995, - "thumping": 25996, - "##ngen": 25997, - "mikey": 25998, - "sheds": 25999, - "fide": 26000, - "rescues": 26001, - "springsteen": 26002, - "vested": 26003, - "valuation": 26004, - "##ece": 26005, - "##ely": 26006, - "pinnacle": 26007, - "rake": 26008, - "sylvie": 26009, - "##edo": 26010, - "almond": 26011, - "quivering": 26012, - "##irus": 26013, - "alteration": 26014, - "faltered": 26015, - "##wad": 26016, - "51st": 26017, - "hydra": 26018, - "ticked": 26019, - "##kato": 26020, - "recommends": 26021, - "##dicated": 26022, - "antigua": 26023, - "arjun": 26024, - "stagecoach": 26025, - "wilfred": 26026, - "trickle": 26027, - "pronouns": 26028, - "##pon": 26029, - "aryan": 26030, - "nighttime": 26031, - "##anian": 26032, - "gall": 26033, - "pea": 26034, - "stitch": 26035, - "##hei": 26036, - "leung": 26037, - "milos": 26038, - "##dini": 26039, - "eritrea": 26040, - "nexus": 26041, - "starved": 26042, - "snowfall": 26043, - "kant": 26044, - "parasitic": 26045, - "cot": 26046, - "discus": 26047, - "hana": 26048, - "strikers": 26049, - "appleton": 26050, - "kitchens": 26051, - "##erina": 26052, - "##partisan": 26053, - "##itha": 26054, - "##vius": 26055, - "disclose": 26056, - "metis": 26057, - "##channel": 26058, - "1701": 26059, - "tesla": 26060, - "##vera": 26061, - "fitch": 26062, - "1735": 26063, - "blooded": 26064, - "##tila": 26065, - "decimal": 26066, - "##tang": 26067, - "##bai": 26068, - "cyclones": 26069, - "eun": 26070, - "bottled": 26071, - "peas": 26072, - "pensacola": 26073, - "basha": 26074, - "bolivian": 26075, - "crabs": 26076, - "boil": 26077, - "lanterns": 26078, - "partridge": 26079, - "roofed": 26080, - "1645": 26081, - "necks": 26082, - "##phila": 26083, - "opined": 26084, - "patting": 26085, - "##kla": 26086, - "##lland": 26087, - "chuckles": 26088, - "volta": 26089, - "whereupon": 26090, - "##nche": 26091, - "devout": 26092, - "euroleague": 26093, - "suicidal": 26094, - "##dee": 26095, - "inherently": 26096, - "involuntary": 26097, - "knitting": 26098, - "nasser": 26099, - "##hide": 26100, - "puppets": 26101, - "colourful": 26102, - "courageous": 26103, - "southend": 26104, - "stills": 26105, - "miraculous": 26106, - "hodgson": 26107, - "richer": 26108, - "rochdale": 26109, - "ethernet": 26110, - "greta": 26111, - "uniting": 26112, - "prism": 26113, - "umm": 26114, - "##haya": 26115, - "##itical": 26116, - "##utation": 26117, - "deterioration": 26118, - "pointe": 26119, - "prowess": 26120, - "##ropriation": 26121, - "lids": 26122, - "scranton": 26123, - "billings": 26124, - "subcontinent": 26125, - "##koff": 26126, - "##scope": 26127, - "brute": 26128, - "kellogg": 26129, - "psalms": 26130, - "degraded": 26131, - "##vez": 26132, - "stanisław": 26133, - "##ructured": 26134, - "ferreira": 26135, - "pun": 26136, - "astonishing": 26137, - "gunnar": 26138, - "##yat": 26139, - "arya": 26140, - "prc": 26141, - "gottfried": 26142, - "##tight": 26143, - "excursion": 26144, - "##ographer": 26145, - "dina": 26146, - "##quil": 26147, - "##nare": 26148, - "huffington": 26149, - "illustrious": 26150, - "wilbur": 26151, - "gundam": 26152, - "verandah": 26153, - "##zard": 26154, - "naacp": 26155, - "##odle": 26156, - "constructive": 26157, - "fjord": 26158, - "kade": 26159, - "##naud": 26160, - "generosity": 26161, - "thrilling": 26162, - "baseline": 26163, - "cayman": 26164, - "frankish": 26165, - "plastics": 26166, - "accommodations": 26167, - "zoological": 26168, - "##fting": 26169, - "cedric": 26170, - "qb": 26171, - "motorized": 26172, - "##dome": 26173, - "##otted": 26174, - "squealed": 26175, - "tackled": 26176, - "canucks": 26177, - "budgets": 26178, - "situ": 26179, - "asthma": 26180, - "dail": 26181, - "gabled": 26182, - "grasslands": 26183, - "whimpered": 26184, - "writhing": 26185, - "judgments": 26186, - "##65": 26187, - "minnie": 26188, - "pv": 26189, - "##carbon": 26190, - "bananas": 26191, - "grille": 26192, - "domes": 26193, - "monique": 26194, - "odin": 26195, - "maguire": 26196, - "markham": 26197, - "tierney": 26198, - "##estra": 26199, - "##chua": 26200, - "libel": 26201, - "poke": 26202, - "speedy": 26203, - "atrium": 26204, - "laval": 26205, - "notwithstanding": 26206, - "##edly": 26207, - "fai": 26208, - "kala": 26209, - "##sur": 26210, - "robb": 26211, - "##sma": 26212, - "listings": 26213, - "luz": 26214, - "supplementary": 26215, - "tianjin": 26216, - "##acing": 26217, - "enzo": 26218, - "jd": 26219, - "ric": 26220, - "scanner": 26221, - "croats": 26222, - "transcribed": 26223, - "##49": 26224, - "arden": 26225, - "cv": 26226, - "##hair": 26227, - "##raphy": 26228, - "##lver": 26229, - "##uy": 26230, - "357": 26231, - "seventies": 26232, - "staggering": 26233, - "alam": 26234, - "horticultural": 26235, - "hs": 26236, - "regression": 26237, - "timbers": 26238, - "blasting": 26239, - "##ounded": 26240, - "montagu": 26241, - "manipulating": 26242, - "##cit": 26243, - "catalytic": 26244, - "1550": 26245, - "troopers": 26246, - "##meo": 26247, - "condemnation": 26248, - "fitzpatrick": 26249, - "##oire": 26250, - "##roved": 26251, - "inexperienced": 26252, - "1670": 26253, - "castes": 26254, - "##lative": 26255, - "outing": 26256, - "314": 26257, - "dubois": 26258, - "flicking": 26259, - "quarrel": 26260, - "ste": 26261, - "learners": 26262, - "1625": 26263, - "iq": 26264, - "whistled": 26265, - "##class": 26266, - "282": 26267, - "classify": 26268, - "tariffs": 26269, - "temperament": 26270, - "355": 26271, - "folly": 26272, - "liszt": 26273, - "##yles": 26274, - "immersed": 26275, - "jordanian": 26276, - "ceasefire": 26277, - "apparel": 26278, - "extras": 26279, - "maru": 26280, - "fished": 26281, - "##bio": 26282, - "harta": 26283, - "stockport": 26284, - "assortment": 26285, - "craftsman": 26286, - "paralysis": 26287, - "transmitters": 26288, - "##cola": 26289, - "blindness": 26290, - "##wk": 26291, - "fatally": 26292, - "proficiency": 26293, - "solemnly": 26294, - "##orno": 26295, - "repairing": 26296, - "amore": 26297, - "groceries": 26298, - "ultraviolet": 26299, - "##chase": 26300, - "schoolhouse": 26301, - "##tua": 26302, - "resurgence": 26303, - "nailed": 26304, - "##otype": 26305, - "##×": 26306, - "ruse": 26307, - "saliva": 26308, - "diagrams": 26309, - "##tructing": 26310, - "albans": 26311, - "rann": 26312, - "thirties": 26313, - "1b": 26314, - "antennas": 26315, - "hilarious": 26316, - "cougars": 26317, - "paddington": 26318, - "stats": 26319, - "##eger": 26320, - "breakaway": 26321, - "ipod": 26322, - "reza": 26323, - "authorship": 26324, - "prohibiting": 26325, - "scoffed": 26326, - "##etz": 26327, - "##ttle": 26328, - "conscription": 26329, - "defected": 26330, - "trondheim": 26331, - "##fires": 26332, - "ivanov": 26333, - "keenan": 26334, - "##adan": 26335, - "##ciful": 26336, - "##fb": 26337, - "##slow": 26338, - "locating": 26339, - "##ials": 26340, - "##tford": 26341, - "cadiz": 26342, - "basalt": 26343, - "blankly": 26344, - "interned": 26345, - "rags": 26346, - "rattling": 26347, - "##tick": 26348, - "carpathian": 26349, - "reassured": 26350, - "sync": 26351, - "bum": 26352, - "guildford": 26353, - "iss": 26354, - "staunch": 26355, - "##onga": 26356, - "astronomers": 26357, - "sera": 26358, - "sofie": 26359, - "emergencies": 26360, - "susquehanna": 26361, - "##heard": 26362, - "duc": 26363, - "mastery": 26364, - "vh1": 26365, - "williamsburg": 26366, - "bayer": 26367, - "buckled": 26368, - "craving": 26369, - "##khan": 26370, - "##rdes": 26371, - "bloomington": 26372, - "##write": 26373, - "alton": 26374, - "barbecue": 26375, - "##bians": 26376, - "justine": 26377, - "##hri": 26378, - "##ndt": 26379, - "delightful": 26380, - "smartphone": 26381, - "newtown": 26382, - "photon": 26383, - "retrieval": 26384, - "peugeot": 26385, - "hissing": 26386, - "##monium": 26387, - "##orough": 26388, - "flavors": 26389, - "lighted": 26390, - "relaunched": 26391, - "tainted": 26392, - "##games": 26393, - "##lysis": 26394, - "anarchy": 26395, - "microscopic": 26396, - "hopping": 26397, - "adept": 26398, - "evade": 26399, - "evie": 26400, - "##beau": 26401, - "inhibit": 26402, - "sinn": 26403, - "adjustable": 26404, - "hurst": 26405, - "intuition": 26406, - "wilton": 26407, - "cisco": 26408, - "44th": 26409, - "lawful": 26410, - "lowlands": 26411, - "stockings": 26412, - "thierry": 26413, - "##dalen": 26414, - "##hila": 26415, - "##nai": 26416, - "fates": 26417, - "prank": 26418, - "tb": 26419, - "maison": 26420, - "lobbied": 26421, - "provocative": 26422, - "1724": 26423, - "4a": 26424, - "utopia": 26425, - "##qual": 26426, - "carbonate": 26427, - "gujarati": 26428, - "purcell": 26429, - "##rford": 26430, - "curtiss": 26431, - "##mei": 26432, - "overgrown": 26433, - "arenas": 26434, - "mediation": 26435, - "swallows": 26436, - "##rnik": 26437, - "respectful": 26438, - "turnbull": 26439, - "##hedron": 26440, - "##hope": 26441, - "alyssa": 26442, - "ozone": 26443, - "##ʻi": 26444, - "ami": 26445, - "gestapo": 26446, - "johansson": 26447, - "snooker": 26448, - "canteen": 26449, - "cuff": 26450, - "declines": 26451, - "empathy": 26452, - "stigma": 26453, - "##ags": 26454, - "##iner": 26455, - "##raine": 26456, - "taxpayers": 26457, - "gui": 26458, - "volga": 26459, - "##wright": 26460, - "##copic": 26461, - "lifespan": 26462, - "overcame": 26463, - "tattooed": 26464, - "enactment": 26465, - "giggles": 26466, - "##ador": 26467, - "##camp": 26468, - "barrington": 26469, - "bribe": 26470, - "obligatory": 26471, - "orbiting": 26472, - "peng": 26473, - "##enas": 26474, - "elusive": 26475, - "sucker": 26476, - "##vating": 26477, - "cong": 26478, - "hardship": 26479, - "empowered": 26480, - "anticipating": 26481, - "estrada": 26482, - "cryptic": 26483, - "greasy": 26484, - "detainees": 26485, - "planck": 26486, - "sudbury": 26487, - "plaid": 26488, - "dod": 26489, - "marriott": 26490, - "kayla": 26491, - "##ears": 26492, - "##vb": 26493, - "##zd": 26494, - "mortally": 26495, - "##hein": 26496, - "cognition": 26497, - "radha": 26498, - "319": 26499, - "liechtenstein": 26500, - "meade": 26501, - "richly": 26502, - "argyle": 26503, - "harpsichord": 26504, - "liberalism": 26505, - "trumpets": 26506, - "lauded": 26507, - "tyrant": 26508, - "salsa": 26509, - "tiled": 26510, - "lear": 26511, - "promoters": 26512, - "reused": 26513, - "slicing": 26514, - "trident": 26515, - "##chuk": 26516, - "##gami": 26517, - "##lka": 26518, - "cantor": 26519, - "checkpoint": 26520, - "##points": 26521, - "gaul": 26522, - "leger": 26523, - "mammalian": 26524, - "##tov": 26525, - "##aar": 26526, - "##schaft": 26527, - "doha": 26528, - "frenchman": 26529, - "nirvana": 26530, - "##vino": 26531, - "delgado": 26532, - "headlining": 26533, - "##eron": 26534, - "##iography": 26535, - "jug": 26536, - "tko": 26537, - "1649": 26538, - "naga": 26539, - "intersections": 26540, - "##jia": 26541, - "benfica": 26542, - "nawab": 26543, - "##suka": 26544, - "ashford": 26545, - "gulp": 26546, - "##deck": 26547, - "##vill": 26548, - "##rug": 26549, - "brentford": 26550, - "frazier": 26551, - "pleasures": 26552, - "dunne": 26553, - "potsdam": 26554, - "shenzhen": 26555, - "dentistry": 26556, - "##tec": 26557, - "flanagan": 26558, - "##dorff": 26559, - "##hear": 26560, - "chorale": 26561, - "dinah": 26562, - "prem": 26563, - "quezon": 26564, - "##rogated": 26565, - "relinquished": 26566, - "sutra": 26567, - "terri": 26568, - "##pani": 26569, - "flaps": 26570, - "##rissa": 26571, - "poly": 26572, - "##rnet": 26573, - "homme": 26574, - "aback": 26575, - "##eki": 26576, - "linger": 26577, - "womb": 26578, - "##kson": 26579, - "##lewood": 26580, - "doorstep": 26581, - "orthodoxy": 26582, - "threaded": 26583, - "westfield": 26584, - "##rval": 26585, - "dioceses": 26586, - "fridays": 26587, - "subsided": 26588, - "##gata": 26589, - "loyalists": 26590, - "##biotic": 26591, - "##ettes": 26592, - "letterman": 26593, - "lunatic": 26594, - "prelate": 26595, - "tenderly": 26596, - "invariably": 26597, - "souza": 26598, - "thug": 26599, - "winslow": 26600, - "##otide": 26601, - "furlongs": 26602, - "gogh": 26603, - "jeopardy": 26604, - "##runa": 26605, - "pegasus": 26606, - "##umble": 26607, - "humiliated": 26608, - "standalone": 26609, - "tagged": 26610, - "##roller": 26611, - "freshmen": 26612, - "klan": 26613, - "##bright": 26614, - "attaining": 26615, - "initiating": 26616, - "transatlantic": 26617, - "logged": 26618, - "viz": 26619, - "##uance": 26620, - "1723": 26621, - "combatants": 26622, - "intervening": 26623, - "stephane": 26624, - "chieftain": 26625, - "despised": 26626, - "grazed": 26627, - "317": 26628, - "cdc": 26629, - "galveston": 26630, - "godzilla": 26631, - "macro": 26632, - "simulate": 26633, - "##planes": 26634, - "parades": 26635, - "##esses": 26636, - "960": 26637, - "##ductive": 26638, - "##unes": 26639, - "equator": 26640, - "overdose": 26641, - "##cans": 26642, - "##hosh": 26643, - "##lifting": 26644, - "joshi": 26645, - "epstein": 26646, - "sonora": 26647, - "treacherous": 26648, - "aquatics": 26649, - "manchu": 26650, - "responsive": 26651, - "##sation": 26652, - "supervisory": 26653, - "##christ": 26654, - "##llins": 26655, - "##ibar": 26656, - "##balance": 26657, - "##uso": 26658, - "kimball": 26659, - "karlsruhe": 26660, - "mab": 26661, - "##emy": 26662, - "ignores": 26663, - "phonetic": 26664, - "reuters": 26665, - "spaghetti": 26666, - "820": 26667, - "almighty": 26668, - "danzig": 26669, - "rumbling": 26670, - "tombstone": 26671, - "designations": 26672, - "lured": 26673, - "outset": 26674, - "##felt": 26675, - "supermarkets": 26676, - "##wt": 26677, - "grupo": 26678, - "kei": 26679, - "kraft": 26680, - "susanna": 26681, - "##blood": 26682, - "comprehension": 26683, - "genealogy": 26684, - "##aghan": 26685, - "##verted": 26686, - "redding": 26687, - "##ythe": 26688, - "1722": 26689, - "bowing": 26690, - "##pore": 26691, - "##roi": 26692, - "lest": 26693, - "sharpened": 26694, - "fulbright": 26695, - "valkyrie": 26696, - "sikhs": 26697, - "##unds": 26698, - "swans": 26699, - "bouquet": 26700, - "merritt": 26701, - "##tage": 26702, - "##venting": 26703, - "commuted": 26704, - "redhead": 26705, - "clerks": 26706, - "leasing": 26707, - "cesare": 26708, - "dea": 26709, - "hazy": 26710, - "##vances": 26711, - "fledged": 26712, - "greenfield": 26713, - "servicemen": 26714, - "##gical": 26715, - "armando": 26716, - "blackout": 26717, - "dt": 26718, - "sagged": 26719, - "downloadable": 26720, - "intra": 26721, - "potion": 26722, - "pods": 26723, - "##4th": 26724, - "##mism": 26725, - "xp": 26726, - "attendants": 26727, - "gambia": 26728, - "stale": 26729, - "##ntine": 26730, - "plump": 26731, - "asteroids": 26732, - "rediscovered": 26733, - "buds": 26734, - "flea": 26735, - "hive": 26736, - "##neas": 26737, - "1737": 26738, - "classifications": 26739, - "debuts": 26740, - "##eles": 26741, - "olympus": 26742, - "scala": 26743, - "##eurs": 26744, - "##gno": 26745, - "##mute": 26746, - "hummed": 26747, - "sigismund": 26748, - "visuals": 26749, - "wiggled": 26750, - "await": 26751, - "pilasters": 26752, - "clench": 26753, - "sulfate": 26754, - "##ances": 26755, - "bellevue": 26756, - "enigma": 26757, - "trainee": 26758, - "snort": 26759, - "##sw": 26760, - "clouded": 26761, - "denim": 26762, - "##rank": 26763, - "##rder": 26764, - "churning": 26765, - "hartman": 26766, - "lodges": 26767, - "riches": 26768, - "sima": 26769, - "##missible": 26770, - "accountable": 26771, - "socrates": 26772, - "regulates": 26773, - "mueller": 26774, - "##cr": 26775, - "1702": 26776, - "avoids": 26777, - "solids": 26778, - "himalayas": 26779, - "nutrient": 26780, - "pup": 26781, - "##jevic": 26782, - "squat": 26783, - "fades": 26784, - "nec": 26785, - "##lates": 26786, - "##pina": 26787, - "##rona": 26788, - "##ου": 26789, - "privateer": 26790, - "tequila": 26791, - "##gative": 26792, - "##mpton": 26793, - "apt": 26794, - "hornet": 26795, - "immortals": 26796, - "##dou": 26797, - "asturias": 26798, - "cleansing": 26799, - "dario": 26800, - "##rries": 26801, - "##anta": 26802, - "etymology": 26803, - "servicing": 26804, - "zhejiang": 26805, - "##venor": 26806, - "##nx": 26807, - "horned": 26808, - "erasmus": 26809, - "rayon": 26810, - "relocating": 26811, - "£10": 26812, - "##bags": 26813, - "escalated": 26814, - "promenade": 26815, - "stubble": 26816, - "2010s": 26817, - "artisans": 26818, - "axial": 26819, - "liquids": 26820, - "mora": 26821, - "sho": 26822, - "yoo": 26823, - "##tsky": 26824, - "bundles": 26825, - "oldies": 26826, - "##nally": 26827, - "notification": 26828, - "bastion": 26829, - "##ths": 26830, - "sparkle": 26831, - "##lved": 26832, - "1728": 26833, - "leash": 26834, - "pathogen": 26835, - "highs": 26836, - "##hmi": 26837, - "immature": 26838, - "880": 26839, - "gonzaga": 26840, - "ignatius": 26841, - "mansions": 26842, - "monterrey": 26843, - "sweets": 26844, - "bryson": 26845, - "##loe": 26846, - "polled": 26847, - "regatta": 26848, - "brightest": 26849, - "pei": 26850, - "rosy": 26851, - "squid": 26852, - "hatfield": 26853, - "payroll": 26854, - "addict": 26855, - "meath": 26856, - "cornerback": 26857, - "heaviest": 26858, - "lodging": 26859, - "##mage": 26860, - "capcom": 26861, - "rippled": 26862, - "##sily": 26863, - "barnet": 26864, - "mayhem": 26865, - "ymca": 26866, - "snuggled": 26867, - "rousseau": 26868, - "##cute": 26869, - "blanchard": 26870, - "284": 26871, - "fragmented": 26872, - "leighton": 26873, - "chromosomes": 26874, - "risking": 26875, - "##md": 26876, - "##strel": 26877, - "##utter": 26878, - "corinne": 26879, - "coyotes": 26880, - "cynical": 26881, - "hiroshi": 26882, - "yeomanry": 26883, - "##ractive": 26884, - "ebook": 26885, - "grading": 26886, - "mandela": 26887, - "plume": 26888, - "agustin": 26889, - "magdalene": 26890, - "##rkin": 26891, - "bea": 26892, - "femme": 26893, - "trafford": 26894, - "##coll": 26895, - "##lun": 26896, - "##tance": 26897, - "52nd": 26898, - "fourier": 26899, - "upton": 26900, - "##mental": 26901, - "camilla": 26902, - "gust": 26903, - "iihf": 26904, - "islamabad": 26905, - "longevity": 26906, - "##kala": 26907, - "feldman": 26908, - "netting": 26909, - "##rization": 26910, - "endeavour": 26911, - "foraging": 26912, - "mfa": 26913, - "orr": 26914, - "##open": 26915, - "greyish": 26916, - "contradiction": 26917, - "graz": 26918, - "##ruff": 26919, - "handicapped": 26920, - "marlene": 26921, - "tweed": 26922, - "oaxaca": 26923, - "spp": 26924, - "campos": 26925, - "miocene": 26926, - "pri": 26927, - "configured": 26928, - "cooks": 26929, - "pluto": 26930, - "cozy": 26931, - "pornographic": 26932, - "##entes": 26933, - "70th": 26934, - "fairness": 26935, - "glided": 26936, - "jonny": 26937, - "lynne": 26938, - "rounding": 26939, - "sired": 26940, - "##emon": 26941, - "##nist": 26942, - "remade": 26943, - "uncover": 26944, - "##mack": 26945, - "complied": 26946, - "lei": 26947, - "newsweek": 26948, - "##jured": 26949, - "##parts": 26950, - "##enting": 26951, - "##pg": 26952, - "293": 26953, - "finer": 26954, - "guerrillas": 26955, - "athenian": 26956, - "deng": 26957, - "disused": 26958, - "stepmother": 26959, - "accuse": 26960, - "gingerly": 26961, - "seduction": 26962, - "521": 26963, - "confronting": 26964, - "##walker": 26965, - "##going": 26966, - "gora": 26967, - "nostalgia": 26968, - "sabres": 26969, - "virginity": 26970, - "wrenched": 26971, - "##minated": 26972, - "syndication": 26973, - "wielding": 26974, - "eyre": 26975, - "##56": 26976, - "##gnon": 26977, - "##igny": 26978, - "behaved": 26979, - "taxpayer": 26980, - "sweeps": 26981, - "##growth": 26982, - "childless": 26983, - "gallant": 26984, - "##ywood": 26985, - "amplified": 26986, - "geraldine": 26987, - "scrape": 26988, - "##ffi": 26989, - "babylonian": 26990, - "fresco": 26991, - "##rdan": 26992, - "##kney": 26993, - "##position": 26994, - "1718": 26995, - "restricting": 26996, - "tack": 26997, - "fukuoka": 26998, - "osborn": 26999, - "selector": 27000, - "partnering": 27001, - "##dlow": 27002, - "318": 27003, - "gnu": 27004, - "kia": 27005, - "tak": 27006, - "whitley": 27007, - "gables": 27008, - "##54": 27009, - "##mania": 27010, - "mri": 27011, - "softness": 27012, - "immersion": 27013, - "##bots": 27014, - "##evsky": 27015, - "1713": 27016, - "chilling": 27017, - "insignificant": 27018, - "pcs": 27019, - "##uis": 27020, - "elites": 27021, - "lina": 27022, - "purported": 27023, - "supplemental": 27024, - "teaming": 27025, - "##americana": 27026, - "##dding": 27027, - "##inton": 27028, - "proficient": 27029, - "rouen": 27030, - "##nage": 27031, - "##rret": 27032, - "niccolo": 27033, - "selects": 27034, - "##bread": 27035, - "fluffy": 27036, - "1621": 27037, - "gruff": 27038, - "knotted": 27039, - "mukherjee": 27040, - "polgara": 27041, - "thrash": 27042, - "nicholls": 27043, - "secluded": 27044, - "smoothing": 27045, - "thru": 27046, - "corsica": 27047, - "loaf": 27048, - "whitaker": 27049, - "inquiries": 27050, - "##rrier": 27051, - "##kam": 27052, - "indochina": 27053, - "289": 27054, - "marlins": 27055, - "myles": 27056, - "peking": 27057, - "##tea": 27058, - "extracts": 27059, - "pastry": 27060, - "superhuman": 27061, - "connacht": 27062, - "vogel": 27063, - "##ditional": 27064, - "##het": 27065, - "##udged": 27066, - "##lash": 27067, - "gloss": 27068, - "quarries": 27069, - "refit": 27070, - "teaser": 27071, - "##alic": 27072, - "##gaon": 27073, - "20s": 27074, - "materialized": 27075, - "sling": 27076, - "camped": 27077, - "pickering": 27078, - "tung": 27079, - "tracker": 27080, - "pursuant": 27081, - "##cide": 27082, - "cranes": 27083, - "soc": 27084, - "##cini": 27085, - "##typical": 27086, - "##viere": 27087, - "anhalt": 27088, - "overboard": 27089, - "workout": 27090, - "chores": 27091, - "fares": 27092, - "orphaned": 27093, - "stains": 27094, - "##logie": 27095, - "fenton": 27096, - "surpassing": 27097, - "joyah": 27098, - "triggers": 27099, - "##itte": 27100, - "grandmaster": 27101, - "##lass": 27102, - "##lists": 27103, - "clapping": 27104, - "fraudulent": 27105, - "ledger": 27106, - "nagasaki": 27107, - "##cor": 27108, - "##nosis": 27109, - "##tsa": 27110, - "eucalyptus": 27111, - "tun": 27112, - "##icio": 27113, - "##rney": 27114, - "##tara": 27115, - "dax": 27116, - "heroism": 27117, - "ina": 27118, - "wrexham": 27119, - "onboard": 27120, - "unsigned": 27121, - "##dates": 27122, - "moshe": 27123, - "galley": 27124, - "winnie": 27125, - "droplets": 27126, - "exiles": 27127, - "praises": 27128, - "watered": 27129, - "noodles": 27130, - "##aia": 27131, - "fein": 27132, - "adi": 27133, - "leland": 27134, - "multicultural": 27135, - "stink": 27136, - "bingo": 27137, - "comets": 27138, - "erskine": 27139, - "modernized": 27140, - "canned": 27141, - "constraint": 27142, - "domestically": 27143, - "chemotherapy": 27144, - "featherweight": 27145, - "stifled": 27146, - "##mum": 27147, - "darkly": 27148, - "irresistible": 27149, - "refreshing": 27150, - "hasty": 27151, - "isolate": 27152, - "##oys": 27153, - "kitchener": 27154, - "planners": 27155, - "##wehr": 27156, - "cages": 27157, - "yarn": 27158, - "implant": 27159, - "toulon": 27160, - "elects": 27161, - "childbirth": 27162, - "yue": 27163, - "##lind": 27164, - "##lone": 27165, - "cn": 27166, - "rightful": 27167, - "sportsman": 27168, - "junctions": 27169, - "remodeled": 27170, - "specifies": 27171, - "##rgh": 27172, - "291": 27173, - "##oons": 27174, - "complimented": 27175, - "##urgent": 27176, - "lister": 27177, - "ot": 27178, - "##logic": 27179, - "bequeathed": 27180, - "cheekbones": 27181, - "fontana": 27182, - "gabby": 27183, - "##dial": 27184, - "amadeus": 27185, - "corrugated": 27186, - "maverick": 27187, - "resented": 27188, - "triangles": 27189, - "##hered": 27190, - "##usly": 27191, - "nazareth": 27192, - "tyrol": 27193, - "1675": 27194, - "assent": 27195, - "poorer": 27196, - "sectional": 27197, - "aegean": 27198, - "##cous": 27199, - "296": 27200, - "nylon": 27201, - "ghanaian": 27202, - "##egorical": 27203, - "##weig": 27204, - "cushions": 27205, - "forbid": 27206, - "fusiliers": 27207, - "obstruction": 27208, - "somerville": 27209, - "##scia": 27210, - "dime": 27211, - "earrings": 27212, - "elliptical": 27213, - "leyte": 27214, - "oder": 27215, - "polymers": 27216, - "timmy": 27217, - "atm": 27218, - "midtown": 27219, - "piloted": 27220, - "settles": 27221, - "continual": 27222, - "externally": 27223, - "mayfield": 27224, - "##uh": 27225, - "enrichment": 27226, - "henson": 27227, - "keane": 27228, - "persians": 27229, - "1733": 27230, - "benji": 27231, - "braden": 27232, - "pep": 27233, - "324": 27234, - "##efe": 27235, - "contenders": 27236, - "pepsi": 27237, - "valet": 27238, - "##isches": 27239, - "298": 27240, - "##asse": 27241, - "##earing": 27242, - "goofy": 27243, - "stroll": 27244, - "##amen": 27245, - "authoritarian": 27246, - "occurrences": 27247, - "adversary": 27248, - "ahmedabad": 27249, - "tangent": 27250, - "toppled": 27251, - "dorchester": 27252, - "1672": 27253, - "modernism": 27254, - "marxism": 27255, - "islamist": 27256, - "charlemagne": 27257, - "exponential": 27258, - "racks": 27259, - "unicode": 27260, - "brunette": 27261, - "mbc": 27262, - "pic": 27263, - "skirmish": 27264, - "##bund": 27265, - "##lad": 27266, - "##powered": 27267, - "##yst": 27268, - "hoisted": 27269, - "messina": 27270, - "shatter": 27271, - "##ctum": 27272, - "jedi": 27273, - "vantage": 27274, - "##music": 27275, - "##neil": 27276, - "clemens": 27277, - "mahmoud": 27278, - "corrupted": 27279, - "authentication": 27280, - "lowry": 27281, - "nils": 27282, - "##washed": 27283, - "omnibus": 27284, - "wounding": 27285, - "jillian": 27286, - "##itors": 27287, - "##opped": 27288, - "serialized": 27289, - "narcotics": 27290, - "handheld": 27291, - "##arm": 27292, - "##plicity": 27293, - "intersecting": 27294, - "stimulating": 27295, - "##onis": 27296, - "crate": 27297, - "fellowships": 27298, - "hemingway": 27299, - "casinos": 27300, - "climatic": 27301, - "fordham": 27302, - "copeland": 27303, - "drip": 27304, - "beatty": 27305, - "leaflets": 27306, - "robber": 27307, - "brothel": 27308, - "madeira": 27309, - "##hedral": 27310, - "sphinx": 27311, - "ultrasound": 27312, - "##vana": 27313, - "valor": 27314, - "forbade": 27315, - "leonid": 27316, - "villas": 27317, - "##aldo": 27318, - "duane": 27319, - "marquez": 27320, - "##cytes": 27321, - "disadvantaged": 27322, - "forearms": 27323, - "kawasaki": 27324, - "reacts": 27325, - "consular": 27326, - "lax": 27327, - "uncles": 27328, - "uphold": 27329, - "##hopper": 27330, - "concepcion": 27331, - "dorsey": 27332, - "lass": 27333, - "##izan": 27334, - "arching": 27335, - "passageway": 27336, - "1708": 27337, - "researches": 27338, - "tia": 27339, - "internationals": 27340, - "##graphs": 27341, - "##opers": 27342, - "distinguishes": 27343, - "javanese": 27344, - "divert": 27345, - "##uven": 27346, - "plotted": 27347, - "##listic": 27348, - "##rwin": 27349, - "##erik": 27350, - "##tify": 27351, - "affirmative": 27352, - "signifies": 27353, - "validation": 27354, - "##bson": 27355, - "kari": 27356, - "felicity": 27357, - "georgina": 27358, - "zulu": 27359, - "##eros": 27360, - "##rained": 27361, - "##rath": 27362, - "overcoming": 27363, - "##dot": 27364, - "argyll": 27365, - "##rbin": 27366, - "1734": 27367, - "chiba": 27368, - "ratification": 27369, - "windy": 27370, - "earls": 27371, - "parapet": 27372, - "##marks": 27373, - "hunan": 27374, - "pristine": 27375, - "astrid": 27376, - "punta": 27377, - "##gart": 27378, - "brodie": 27379, - "##kota": 27380, - "##oder": 27381, - "malaga": 27382, - "minerva": 27383, - "rouse": 27384, - "##phonic": 27385, - "bellowed": 27386, - "pagoda": 27387, - "portals": 27388, - "reclamation": 27389, - "##gur": 27390, - "##odies": 27391, - "##⁄₄": 27392, - "parentheses": 27393, - "quoting": 27394, - "allergic": 27395, - "palette": 27396, - "showcases": 27397, - "benefactor": 27398, - "heartland": 27399, - "nonlinear": 27400, - "##tness": 27401, - "bladed": 27402, - "cheerfully": 27403, - "scans": 27404, - "##ety": 27405, - "##hone": 27406, - "1666": 27407, - "girlfriends": 27408, - "pedersen": 27409, - "hiram": 27410, - "sous": 27411, - "##liche": 27412, - "##nator": 27413, - "1683": 27414, - "##nery": 27415, - "##orio": 27416, - "##umen": 27417, - "bobo": 27418, - "primaries": 27419, - "smiley": 27420, - "##cb": 27421, - "unearthed": 27422, - "uniformly": 27423, - "fis": 27424, - "metadata": 27425, - "1635": 27426, - "ind": 27427, - "##oted": 27428, - "recoil": 27429, - "##titles": 27430, - "##tura": 27431, - "##ια": 27432, - "406": 27433, - "hilbert": 27434, - "jamestown": 27435, - "mcmillan": 27436, - "tulane": 27437, - "seychelles": 27438, - "##frid": 27439, - "antics": 27440, - "coli": 27441, - "fated": 27442, - "stucco": 27443, - "##grants": 27444, - "1654": 27445, - "bulky": 27446, - "accolades": 27447, - "arrays": 27448, - "caledonian": 27449, - "carnage": 27450, - "optimism": 27451, - "puebla": 27452, - "##tative": 27453, - "##cave": 27454, - "enforcing": 27455, - "rotherham": 27456, - "seo": 27457, - "dunlop": 27458, - "aeronautics": 27459, - "chimed": 27460, - "incline": 27461, - "zoning": 27462, - "archduke": 27463, - "hellenistic": 27464, - "##oses": 27465, - "##sions": 27466, - "candi": 27467, - "thong": 27468, - "##ople": 27469, - "magnate": 27470, - "rustic": 27471, - "##rsk": 27472, - "projective": 27473, - "slant": 27474, - "##offs": 27475, - "danes": 27476, - "hollis": 27477, - "vocalists": 27478, - "##ammed": 27479, - "congenital": 27480, - "contend": 27481, - "gesellschaft": 27482, - "##ocating": 27483, - "##pressive": 27484, - "douglass": 27485, - "quieter": 27486, - "##cm": 27487, - "##kshi": 27488, - "howled": 27489, - "salim": 27490, - "spontaneously": 27491, - "townsville": 27492, - "buena": 27493, - "southport": 27494, - "##bold": 27495, - "kato": 27496, - "1638": 27497, - "faerie": 27498, - "stiffly": 27499, - "##vus": 27500, - "##rled": 27501, - "297": 27502, - "flawless": 27503, - "realising": 27504, - "taboo": 27505, - "##7th": 27506, - "bytes": 27507, - "straightening": 27508, - "356": 27509, - "jena": 27510, - "##hid": 27511, - "##rmin": 27512, - "cartwright": 27513, - "berber": 27514, - "bertram": 27515, - "soloists": 27516, - "411": 27517, - "noses": 27518, - "417": 27519, - "coping": 27520, - "fission": 27521, - "hardin": 27522, - "inca": 27523, - "##cen": 27524, - "1717": 27525, - "mobilized": 27526, - "vhf": 27527, - "##raf": 27528, - "biscuits": 27529, - "curate": 27530, - "##85": 27531, - "##anial": 27532, - "331": 27533, - "gaunt": 27534, - "neighbourhoods": 27535, - "1540": 27536, - "##abas": 27537, - "blanca": 27538, - "bypassed": 27539, - "sockets": 27540, - "behold": 27541, - "coincidentally": 27542, - "##bane": 27543, - "nara": 27544, - "shave": 27545, - "splinter": 27546, - "terrific": 27547, - "##arion": 27548, - "##erian": 27549, - "commonplace": 27550, - "juris": 27551, - "redwood": 27552, - "waistband": 27553, - "boxed": 27554, - "caitlin": 27555, - "fingerprints": 27556, - "jennie": 27557, - "naturalized": 27558, - "##ired": 27559, - "balfour": 27560, - "craters": 27561, - "jody": 27562, - "bungalow": 27563, - "hugely": 27564, - "quilt": 27565, - "glitter": 27566, - "pigeons": 27567, - "undertaker": 27568, - "bulging": 27569, - "constrained": 27570, - "goo": 27571, - "##sil": 27572, - "##akh": 27573, - "assimilation": 27574, - "reworked": 27575, - "##person": 27576, - "persuasion": 27577, - "##pants": 27578, - "felicia": 27579, - "##cliff": 27580, - "##ulent": 27581, - "1732": 27582, - "explodes": 27583, - "##dun": 27584, - "##inium": 27585, - "##zic": 27586, - "lyman": 27587, - "vulture": 27588, - "hog": 27589, - "overlook": 27590, - "begs": 27591, - "northwards": 27592, - "ow": 27593, - "spoil": 27594, - "##urer": 27595, - "fatima": 27596, - "favorably": 27597, - "accumulate": 27598, - "sargent": 27599, - "sorority": 27600, - "corresponded": 27601, - "dispersal": 27602, - "kochi": 27603, - "toned": 27604, - "##imi": 27605, - "##lita": 27606, - "internacional": 27607, - "newfound": 27608, - "##agger": 27609, - "##lynn": 27610, - "##rigue": 27611, - "booths": 27612, - "peanuts": 27613, - "##eborg": 27614, - "medicare": 27615, - "muriel": 27616, - "nur": 27617, - "##uram": 27618, - "crates": 27619, - "millennia": 27620, - "pajamas": 27621, - "worsened": 27622, - "##breakers": 27623, - "jimi": 27624, - "vanuatu": 27625, - "yawned": 27626, - "##udeau": 27627, - "carousel": 27628, - "##hony": 27629, - "hurdle": 27630, - "##ccus": 27631, - "##mounted": 27632, - "##pod": 27633, - "rv": 27634, - "##eche": 27635, - "airship": 27636, - "ambiguity": 27637, - "compulsion": 27638, - "recapture": 27639, - "##claiming": 27640, - "arthritis": 27641, - "##osomal": 27642, - "1667": 27643, - "asserting": 27644, - "ngc": 27645, - "sniffing": 27646, - "dade": 27647, - "discontent": 27648, - "glendale": 27649, - "ported": 27650, - "##amina": 27651, - "defamation": 27652, - "rammed": 27653, - "##scent": 27654, - "fling": 27655, - "livingstone": 27656, - "##fleet": 27657, - "875": 27658, - "##ppy": 27659, - "apocalyptic": 27660, - "comrade": 27661, - "lcd": 27662, - "##lowe": 27663, - "cessna": 27664, - "eine": 27665, - "persecuted": 27666, - "subsistence": 27667, - "demi": 27668, - "hoop": 27669, - "reliefs": 27670, - "710": 27671, - "coptic": 27672, - "progressing": 27673, - "stemmed": 27674, - "perpetrators": 27675, - "1665": 27676, - "priestess": 27677, - "##nio": 27678, - "dobson": 27679, - "ebony": 27680, - "rooster": 27681, - "itf": 27682, - "tortricidae": 27683, - "##bbon": 27684, - "##jian": 27685, - "cleanup": 27686, - "##jean": 27687, - "##øy": 27688, - "1721": 27689, - "eighties": 27690, - "taxonomic": 27691, - "holiness": 27692, - "##hearted": 27693, - "##spar": 27694, - "antilles": 27695, - "showcasing": 27696, - "stabilized": 27697, - "##nb": 27698, - "gia": 27699, - "mascara": 27700, - "michelangelo": 27701, - "dawned": 27702, - "##uria": 27703, - "##vinsky": 27704, - "extinguished": 27705, - "fitz": 27706, - "grotesque": 27707, - "£100": 27708, - "##fera": 27709, - "##loid": 27710, - "##mous": 27711, - "barges": 27712, - "neue": 27713, - "throbbed": 27714, - "cipher": 27715, - "johnnie": 27716, - "##a1": 27717, - "##mpt": 27718, - "outburst": 27719, - "##swick": 27720, - "spearheaded": 27721, - "administrations": 27722, - "c1": 27723, - "heartbreak": 27724, - "pixels": 27725, - "pleasantly": 27726, - "##enay": 27727, - "lombardy": 27728, - "plush": 27729, - "##nsed": 27730, - "bobbie": 27731, - "##hly": 27732, - "reapers": 27733, - "tremor": 27734, - "xiang": 27735, - "minogue": 27736, - "substantive": 27737, - "hitch": 27738, - "barak": 27739, - "##wyl": 27740, - "kwan": 27741, - "##encia": 27742, - "910": 27743, - "obscene": 27744, - "elegance": 27745, - "indus": 27746, - "surfer": 27747, - "bribery": 27748, - "conserve": 27749, - "##hyllum": 27750, - "##masters": 27751, - "horatio": 27752, - "##fat": 27753, - "apes": 27754, - "rebound": 27755, - "psychotic": 27756, - "##pour": 27757, - "iteration": 27758, - "##mium": 27759, - "##vani": 27760, - "botanic": 27761, - "horribly": 27762, - "antiques": 27763, - "dispose": 27764, - "paxton": 27765, - "##hli": 27766, - "##wg": 27767, - "timeless": 27768, - "1704": 27769, - "disregard": 27770, - "engraver": 27771, - "hounds": 27772, - "##bau": 27773, - "##version": 27774, - "looted": 27775, - "uno": 27776, - "facilitates": 27777, - "groans": 27778, - "masjid": 27779, - "rutland": 27780, - "antibody": 27781, - "disqualification": 27782, - "decatur": 27783, - "footballers": 27784, - "quake": 27785, - "slacks": 27786, - "48th": 27787, - "rein": 27788, - "scribe": 27789, - "stabilize": 27790, - "commits": 27791, - "exemplary": 27792, - "tho": 27793, - "##hort": 27794, - "##chison": 27795, - "pantry": 27796, - "traversed": 27797, - "##hiti": 27798, - "disrepair": 27799, - "identifiable": 27800, - "vibrated": 27801, - "baccalaureate": 27802, - "##nnis": 27803, - "csa": 27804, - "interviewing": 27805, - "##iensis": 27806, - "##raße": 27807, - "greaves": 27808, - "wealthiest": 27809, - "343": 27810, - "classed": 27811, - "jogged": 27812, - "£5": 27813, - "##58": 27814, - "##atal": 27815, - "illuminating": 27816, - "knicks": 27817, - "respecting": 27818, - "##uno": 27819, - "scrubbed": 27820, - "##iji": 27821, - "##dles": 27822, - "kruger": 27823, - "moods": 27824, - "growls": 27825, - "raider": 27826, - "silvia": 27827, - "chefs": 27828, - "kam": 27829, - "vr": 27830, - "cree": 27831, - "percival": 27832, - "##terol": 27833, - "gunter": 27834, - "counterattack": 27835, - "defiant": 27836, - "henan": 27837, - "ze": 27838, - "##rasia": 27839, - "##riety": 27840, - "equivalence": 27841, - "submissions": 27842, - "##fra": 27843, - "##thor": 27844, - "bautista": 27845, - "mechanically": 27846, - "##heater": 27847, - "cornice": 27848, - "herbal": 27849, - "templar": 27850, - "##mering": 27851, - "outputs": 27852, - "ruining": 27853, - "ligand": 27854, - "renumbered": 27855, - "extravagant": 27856, - "mika": 27857, - "blockbuster": 27858, - "eta": 27859, - "insurrection": 27860, - "##ilia": 27861, - "darkening": 27862, - "ferocious": 27863, - "pianos": 27864, - "strife": 27865, - "kinship": 27866, - "##aer": 27867, - "melee": 27868, - "##anor": 27869, - "##iste": 27870, - "##may": 27871, - "##oue": 27872, - "decidedly": 27873, - "weep": 27874, - "##jad": 27875, - "##missive": 27876, - "##ppel": 27877, - "354": 27878, - "puget": 27879, - "unease": 27880, - "##gnant": 27881, - "1629": 27882, - "hammering": 27883, - "kassel": 27884, - "ob": 27885, - "wessex": 27886, - "##lga": 27887, - "bromwich": 27888, - "egan": 27889, - "paranoia": 27890, - "utilization": 27891, - "##atable": 27892, - "##idad": 27893, - "contradictory": 27894, - "provoke": 27895, - "##ols": 27896, - "##ouring": 27897, - "##tangled": 27898, - "knesset": 27899, - "##very": 27900, - "##lette": 27901, - "plumbing": 27902, - "##sden": 27903, - "##¹": 27904, - "greensboro": 27905, - "occult": 27906, - "sniff": 27907, - "338": 27908, - "zev": 27909, - "beaming": 27910, - "gamer": 27911, - "haggard": 27912, - "mahal": 27913, - "##olt": 27914, - "##pins": 27915, - "mendes": 27916, - "utmost": 27917, - "briefing": 27918, - "gunnery": 27919, - "##gut": 27920, - "##pher": 27921, - "##zh": 27922, - "##rok": 27923, - "1679": 27924, - "khalifa": 27925, - "sonya": 27926, - "##boot": 27927, - "principals": 27928, - "urbana": 27929, - "wiring": 27930, - "##liffe": 27931, - "##minating": 27932, - "##rrado": 27933, - "dahl": 27934, - "nyu": 27935, - "skepticism": 27936, - "np": 27937, - "townspeople": 27938, - "ithaca": 27939, - "lobster": 27940, - "somethin": 27941, - "##fur": 27942, - "##arina": 27943, - "##−1": 27944, - "freighter": 27945, - "zimmerman": 27946, - "biceps": 27947, - "contractual": 27948, - "##herton": 27949, - "amend": 27950, - "hurrying": 27951, - "subconscious": 27952, - "##anal": 27953, - "336": 27954, - "meng": 27955, - "clermont": 27956, - "spawning": 27957, - "##eia": 27958, - "##lub": 27959, - "dignitaries": 27960, - "impetus": 27961, - "snacks": 27962, - "spotting": 27963, - "twigs": 27964, - "##bilis": 27965, - "##cz": 27966, - "##ouk": 27967, - "libertadores": 27968, - "nic": 27969, - "skylar": 27970, - "##aina": 27971, - "##firm": 27972, - "gustave": 27973, - "asean": 27974, - "##anum": 27975, - "dieter": 27976, - "legislatures": 27977, - "flirt": 27978, - "bromley": 27979, - "trolls": 27980, - "umar": 27981, - "##bbies": 27982, - "##tyle": 27983, - "blah": 27984, - "parc": 27985, - "bridgeport": 27986, - "crank": 27987, - "negligence": 27988, - "##nction": 27989, - "46th": 27990, - "constantin": 27991, - "molded": 27992, - "bandages": 27993, - "seriousness": 27994, - "00pm": 27995, - "siegel": 27996, - "carpets": 27997, - "compartments": 27998, - "upbeat": 27999, - "statehood": 28000, - "##dner": 28001, - "##edging": 28002, - "marko": 28003, - "730": 28004, - "platt": 28005, - "##hane": 28006, - "paving": 28007, - "##iy": 28008, - "1738": 28009, - "abbess": 28010, - "impatience": 28011, - "limousine": 28012, - "nbl": 28013, - "##talk": 28014, - "441": 28015, - "lucille": 28016, - "mojo": 28017, - "nightfall": 28018, - "robbers": 28019, - "##nais": 28020, - "karel": 28021, - "brisk": 28022, - "calves": 28023, - "replicate": 28024, - "ascribed": 28025, - "telescopes": 28026, - "##olf": 28027, - "intimidated": 28028, - "##reen": 28029, - "ballast": 28030, - "specialization": 28031, - "##sit": 28032, - "aerodynamic": 28033, - "caliphate": 28034, - "rainer": 28035, - "visionary": 28036, - "##arded": 28037, - "epsilon": 28038, - "##aday": 28039, - "##onte": 28040, - "aggregation": 28041, - "auditory": 28042, - "boosted": 28043, - "reunification": 28044, - "kathmandu": 28045, - "loco": 28046, - "robyn": 28047, - "402": 28048, - "acknowledges": 28049, - "appointing": 28050, - "humanoid": 28051, - "newell": 28052, - "redeveloped": 28053, - "restraints": 28054, - "##tained": 28055, - "barbarians": 28056, - "chopper": 28057, - "1609": 28058, - "italiana": 28059, - "##lez": 28060, - "##lho": 28061, - "investigates": 28062, - "wrestlemania": 28063, - "##anies": 28064, - "##bib": 28065, - "690": 28066, - "##falls": 28067, - "creaked": 28068, - "dragoons": 28069, - "gravely": 28070, - "minions": 28071, - "stupidity": 28072, - "volley": 28073, - "##harat": 28074, - "##week": 28075, - "musik": 28076, - "##eries": 28077, - "##uously": 28078, - "fungal": 28079, - "massimo": 28080, - "semantics": 28081, - "malvern": 28082, - "##ahl": 28083, - "##pee": 28084, - "discourage": 28085, - "embryo": 28086, - "imperialism": 28087, - "1910s": 28088, - "profoundly": 28089, - "##ddled": 28090, - "jiangsu": 28091, - "sparkled": 28092, - "stat": 28093, - "##holz": 28094, - "sweatshirt": 28095, - "tobin": 28096, - "##iction": 28097, - "sneered": 28098, - "##cheon": 28099, - "##oit": 28100, - "brit": 28101, - "causal": 28102, - "smyth": 28103, - "##neuve": 28104, - "diffuse": 28105, - "perrin": 28106, - "silvio": 28107, - "##ipes": 28108, - "##recht": 28109, - "detonated": 28110, - "iqbal": 28111, - "selma": 28112, - "##nism": 28113, - "##zumi": 28114, - "roasted": 28115, - "##riders": 28116, - "tay": 28117, - "##ados": 28118, - "##mament": 28119, - "##mut": 28120, - "##rud": 28121, - "840": 28122, - "completes": 28123, - "nipples": 28124, - "cfa": 28125, - "flavour": 28126, - "hirsch": 28127, - "##laus": 28128, - "calderon": 28129, - "sneakers": 28130, - "moravian": 28131, - "##ksha": 28132, - "1622": 28133, - "rq": 28134, - "294": 28135, - "##imeters": 28136, - "bodo": 28137, - "##isance": 28138, - "##pre": 28139, - "##ronia": 28140, - "anatomical": 28141, - "excerpt": 28142, - "##lke": 28143, - "dh": 28144, - "kunst": 28145, - "##tablished": 28146, - "##scoe": 28147, - "biomass": 28148, - "panted": 28149, - "unharmed": 28150, - "gael": 28151, - "housemates": 28152, - "montpellier": 28153, - "##59": 28154, - "coa": 28155, - "rodents": 28156, - "tonic": 28157, - "hickory": 28158, - "singleton": 28159, - "##taro": 28160, - "451": 28161, - "1719": 28162, - "aldo": 28163, - "breaststroke": 28164, - "dempsey": 28165, - "och": 28166, - "rocco": 28167, - "##cuit": 28168, - "merton": 28169, - "dissemination": 28170, - "midsummer": 28171, - "serials": 28172, - "##idi": 28173, - "haji": 28174, - "polynomials": 28175, - "##rdon": 28176, - "gs": 28177, - "enoch": 28178, - "prematurely": 28179, - "shutter": 28180, - "taunton": 28181, - "£3": 28182, - "##grating": 28183, - "##inates": 28184, - "archangel": 28185, - "harassed": 28186, - "##asco": 28187, - "326": 28188, - "archway": 28189, - "dazzling": 28190, - "##ecin": 28191, - "1736": 28192, - "sumo": 28193, - "wat": 28194, - "##kovich": 28195, - "1086": 28196, - "honneur": 28197, - "##ently": 28198, - "##nostic": 28199, - "##ttal": 28200, - "##idon": 28201, - "1605": 28202, - "403": 28203, - "1716": 28204, - "blogger": 28205, - "rents": 28206, - "##gnan": 28207, - "hires": 28208, - "##ikh": 28209, - "##dant": 28210, - "howie": 28211, - "##rons": 28212, - "handler": 28213, - "retracted": 28214, - "shocks": 28215, - "1632": 28216, - "arun": 28217, - "duluth": 28218, - "kepler": 28219, - "trumpeter": 28220, - "##lary": 28221, - "peeking": 28222, - "seasoned": 28223, - "trooper": 28224, - "##mara": 28225, - "laszlo": 28226, - "##iciencies": 28227, - "##rti": 28228, - "heterosexual": 28229, - "##inatory": 28230, - "##ssion": 28231, - "indira": 28232, - "jogging": 28233, - "##inga": 28234, - "##lism": 28235, - "beit": 28236, - "dissatisfaction": 28237, - "malice": 28238, - "##ately": 28239, - "nedra": 28240, - "peeling": 28241, - "##rgeon": 28242, - "47th": 28243, - "stadiums": 28244, - "475": 28245, - "vertigo": 28246, - "##ains": 28247, - "iced": 28248, - "restroom": 28249, - "##plify": 28250, - "##tub": 28251, - "illustrating": 28252, - "pear": 28253, - "##chner": 28254, - "##sibility": 28255, - "inorganic": 28256, - "rappers": 28257, - "receipts": 28258, - "watery": 28259, - "##kura": 28260, - "lucinda": 28261, - "##oulos": 28262, - "reintroduced": 28263, - "##8th": 28264, - "##tched": 28265, - "gracefully": 28266, - "saxons": 28267, - "nutritional": 28268, - "wastewater": 28269, - "rained": 28270, - "favourites": 28271, - "bedrock": 28272, - "fisted": 28273, - "hallways": 28274, - "likeness": 28275, - "upscale": 28276, - "##lateral": 28277, - "1580": 28278, - "blinds": 28279, - "prequel": 28280, - "##pps": 28281, - "##tama": 28282, - "deter": 28283, - "humiliating": 28284, - "restraining": 28285, - "tn": 28286, - "vents": 28287, - "1659": 28288, - "laundering": 28289, - "recess": 28290, - "rosary": 28291, - "tractors": 28292, - "coulter": 28293, - "federer": 28294, - "##ifiers": 28295, - "##plin": 28296, - "persistence": 28297, - "##quitable": 28298, - "geschichte": 28299, - "pendulum": 28300, - "quakers": 28301, - "##beam": 28302, - "bassett": 28303, - "pictorial": 28304, - "buffet": 28305, - "koln": 28306, - "##sitor": 28307, - "drills": 28308, - "reciprocal": 28309, - "shooters": 28310, - "##57": 28311, - "##cton": 28312, - "##tees": 28313, - "converge": 28314, - "pip": 28315, - "dmitri": 28316, - "donnelly": 28317, - "yamamoto": 28318, - "aqua": 28319, - "azores": 28320, - "demographics": 28321, - "hypnotic": 28322, - "spitfire": 28323, - "suspend": 28324, - "wryly": 28325, - "roderick": 28326, - "##rran": 28327, - "sebastien": 28328, - "##asurable": 28329, - "mavericks": 28330, - "##fles": 28331, - "##200": 28332, - "himalayan": 28333, - "prodigy": 28334, - "##iance": 28335, - "transvaal": 28336, - "demonstrators": 28337, - "handcuffs": 28338, - "dodged": 28339, - "mcnamara": 28340, - "sublime": 28341, - "1726": 28342, - "crazed": 28343, - "##efined": 28344, - "##till": 28345, - "ivo": 28346, - "pondered": 28347, - "reconciled": 28348, - "shrill": 28349, - "sava": 28350, - "##duk": 28351, - "bal": 28352, - "cad": 28353, - "heresy": 28354, - "jaipur": 28355, - "goran": 28356, - "##nished": 28357, - "341": 28358, - "lux": 28359, - "shelly": 28360, - "whitehall": 28361, - "##hre": 28362, - "israelis": 28363, - "peacekeeping": 28364, - "##wled": 28365, - "1703": 28366, - "demetrius": 28367, - "ousted": 28368, - "##arians": 28369, - "##zos": 28370, - "beale": 28371, - "anwar": 28372, - "backstroke": 28373, - "raged": 28374, - "shrinking": 28375, - "cremated": 28376, - "##yck": 28377, - "benign": 28378, - "towing": 28379, - "wadi": 28380, - "darmstadt": 28381, - "landfill": 28382, - "parana": 28383, - "soothe": 28384, - "colleen": 28385, - "sidewalks": 28386, - "mayfair": 28387, - "tumble": 28388, - "hepatitis": 28389, - "ferrer": 28390, - "superstructure": 28391, - "##gingly": 28392, - "##urse": 28393, - "##wee": 28394, - "anthropological": 28395, - "translators": 28396, - "##mies": 28397, - "closeness": 28398, - "hooves": 28399, - "##pw": 28400, - "mondays": 28401, - "##roll": 28402, - "##vita": 28403, - "landscaping": 28404, - "##urized": 28405, - "purification": 28406, - "sock": 28407, - "thorns": 28408, - "thwarted": 28409, - "jalan": 28410, - "tiberius": 28411, - "##taka": 28412, - "saline": 28413, - "##rito": 28414, - "confidently": 28415, - "khyber": 28416, - "sculptors": 28417, - "##ij": 28418, - "brahms": 28419, - "hammersmith": 28420, - "inspectors": 28421, - "battista": 28422, - "fivb": 28423, - "fragmentation": 28424, - "hackney": 28425, - "##uls": 28426, - "arresting": 28427, - "exercising": 28428, - "antoinette": 28429, - "bedfordshire": 28430, - "##zily": 28431, - "dyed": 28432, - "##hema": 28433, - "1656": 28434, - "racetrack": 28435, - "variability": 28436, - "##tique": 28437, - "1655": 28438, - "austrians": 28439, - "deteriorating": 28440, - "madman": 28441, - "theorists": 28442, - "aix": 28443, - "lehman": 28444, - "weathered": 28445, - "1731": 28446, - "decreed": 28447, - "eruptions": 28448, - "1729": 28449, - "flaw": 28450, - "quinlan": 28451, - "sorbonne": 28452, - "flutes": 28453, - "nunez": 28454, - "1711": 28455, - "adored": 28456, - "downwards": 28457, - "fable": 28458, - "rasped": 28459, - "1712": 28460, - "moritz": 28461, - "mouthful": 28462, - "renegade": 28463, - "shivers": 28464, - "stunts": 28465, - "dysfunction": 28466, - "restrain": 28467, - "translit": 28468, - "327": 28469, - "pancakes": 28470, - "##avio": 28471, - "##cision": 28472, - "##tray": 28473, - "351": 28474, - "vial": 28475, - "##lden": 28476, - "bain": 28477, - "##maid": 28478, - "##oxide": 28479, - "chihuahua": 28480, - "malacca": 28481, - "vimes": 28482, - "##rba": 28483, - "##rnier": 28484, - "1664": 28485, - "donnie": 28486, - "plaques": 28487, - "##ually": 28488, - "337": 28489, - "bangs": 28490, - "floppy": 28491, - "huntsville": 28492, - "loretta": 28493, - "nikolay": 28494, - "##otte": 28495, - "eater": 28496, - "handgun": 28497, - "ubiquitous": 28498, - "##hett": 28499, - "eras": 28500, - "zodiac": 28501, - "1634": 28502, - "##omorphic": 28503, - "1820s": 28504, - "##zog": 28505, - "cochran": 28506, - "##bula": 28507, - "##lithic": 28508, - "warring": 28509, - "##rada": 28510, - "dalai": 28511, - "excused": 28512, - "blazers": 28513, - "mcconnell": 28514, - "reeling": 28515, - "bot": 28516, - "este": 28517, - "##abi": 28518, - "geese": 28519, - "hoax": 28520, - "taxon": 28521, - "##bla": 28522, - "guitarists": 28523, - "##icon": 28524, - "condemning": 28525, - "hunts": 28526, - "inversion": 28527, - "moffat": 28528, - "taekwondo": 28529, - "##lvis": 28530, - "1624": 28531, - "stammered": 28532, - "##rest": 28533, - "##rzy": 28534, - "sousa": 28535, - "fundraiser": 28536, - "marylebone": 28537, - "navigable": 28538, - "uptown": 28539, - "cabbage": 28540, - "daniela": 28541, - "salman": 28542, - "shitty": 28543, - "whimper": 28544, - "##kian": 28545, - "##utive": 28546, - "programmers": 28547, - "protections": 28548, - "rm": 28549, - "##rmi": 28550, - "##rued": 28551, - "forceful": 28552, - "##enes": 28553, - "fuss": 28554, - "##tao": 28555, - "##wash": 28556, - "brat": 28557, - "oppressive": 28558, - "reykjavik": 28559, - "spartak": 28560, - "ticking": 28561, - "##inkles": 28562, - "##kiewicz": 28563, - "adolph": 28564, - "horst": 28565, - "maui": 28566, - "protege": 28567, - "straighten": 28568, - "cpc": 28569, - "landau": 28570, - "concourse": 28571, - "clements": 28572, - "resultant": 28573, - "##ando": 28574, - "imaginative": 28575, - "joo": 28576, - "reactivated": 28577, - "##rem": 28578, - "##ffled": 28579, - "##uising": 28580, - "consultative": 28581, - "##guide": 28582, - "flop": 28583, - "kaitlyn": 28584, - "mergers": 28585, - "parenting": 28586, - "somber": 28587, - "##vron": 28588, - "supervise": 28589, - "vidhan": 28590, - "##imum": 28591, - "courtship": 28592, - "exemplified": 28593, - "harmonies": 28594, - "medallist": 28595, - "refining": 28596, - "##rrow": 28597, - "##ка": 28598, - "amara": 28599, - "##hum": 28600, - "780": 28601, - "goalscorer": 28602, - "sited": 28603, - "overshadowed": 28604, - "rohan": 28605, - "displeasure": 28606, - "secretive": 28607, - "multiplied": 28608, - "osman": 28609, - "##orth": 28610, - "engravings": 28611, - "padre": 28612, - "##kali": 28613, - "##veda": 28614, - "miniatures": 28615, - "mis": 28616, - "##yala": 28617, - "clap": 28618, - "pali": 28619, - "rook": 28620, - "##cana": 28621, - "1692": 28622, - "57th": 28623, - "antennae": 28624, - "astro": 28625, - "oskar": 28626, - "1628": 28627, - "bulldog": 28628, - "crotch": 28629, - "hackett": 28630, - "yucatan": 28631, - "##sure": 28632, - "amplifiers": 28633, - "brno": 28634, - "ferrara": 28635, - "migrating": 28636, - "##gree": 28637, - "thanking": 28638, - "turing": 28639, - "##eza": 28640, - "mccann": 28641, - "ting": 28642, - "andersson": 28643, - "onslaught": 28644, - "gaines": 28645, - "ganga": 28646, - "incense": 28647, - "standardization": 28648, - "##mation": 28649, - "sentai": 28650, - "scuba": 28651, - "stuffing": 28652, - "turquoise": 28653, - "waivers": 28654, - "alloys": 28655, - "##vitt": 28656, - "regaining": 28657, - "vaults": 28658, - "##clops": 28659, - "##gizing": 28660, - "digger": 28661, - "furry": 28662, - "memorabilia": 28663, - "probing": 28664, - "##iad": 28665, - "payton": 28666, - "rec": 28667, - "deutschland": 28668, - "filippo": 28669, - "opaque": 28670, - "seamen": 28671, - "zenith": 28672, - "afrikaans": 28673, - "##filtration": 28674, - "disciplined": 28675, - "inspirational": 28676, - "##merie": 28677, - "banco": 28678, - "confuse": 28679, - "grafton": 28680, - "tod": 28681, - "##dgets": 28682, - "championed": 28683, - "simi": 28684, - "anomaly": 28685, - "biplane": 28686, - "##ceptive": 28687, - "electrode": 28688, - "##para": 28689, - "1697": 28690, - "cleavage": 28691, - "crossbow": 28692, - "swirl": 28693, - "informant": 28694, - "##lars": 28695, - "##osta": 28696, - "afi": 28697, - "bonfire": 28698, - "spec": 28699, - "##oux": 28700, - "lakeside": 28701, - "slump": 28702, - "##culus": 28703, - "##lais": 28704, - "##qvist": 28705, - "##rrigan": 28706, - "1016": 28707, - "facades": 28708, - "borg": 28709, - "inwardly": 28710, - "cervical": 28711, - "xl": 28712, - "pointedly": 28713, - "050": 28714, - "stabilization": 28715, - "##odon": 28716, - "chests": 28717, - "1699": 28718, - "hacked": 28719, - "ctv": 28720, - "orthogonal": 28721, - "suzy": 28722, - "##lastic": 28723, - "gaulle": 28724, - "jacobite": 28725, - "rearview": 28726, - "##cam": 28727, - "##erted": 28728, - "ashby": 28729, - "##drik": 28730, - "##igate": 28731, - "##mise": 28732, - "##zbek": 28733, - "affectionately": 28734, - "canine": 28735, - "disperse": 28736, - "latham": 28737, - "##istles": 28738, - "##ivar": 28739, - "spielberg": 28740, - "##orin": 28741, - "##idium": 28742, - "ezekiel": 28743, - "cid": 28744, - "##sg": 28745, - "durga": 28746, - "middletown": 28747, - "##cina": 28748, - "customized": 28749, - "frontiers": 28750, - "harden": 28751, - "##etano": 28752, - "##zzy": 28753, - "1604": 28754, - "bolsheviks": 28755, - "##66": 28756, - "coloration": 28757, - "yoko": 28758, - "##bedo": 28759, - "briefs": 28760, - "slabs": 28761, - "debra": 28762, - "liquidation": 28763, - "plumage": 28764, - "##oin": 28765, - "blossoms": 28766, - "dementia": 28767, - "subsidy": 28768, - "1611": 28769, - "proctor": 28770, - "relational": 28771, - "jerseys": 28772, - "parochial": 28773, - "ter": 28774, - "##ici": 28775, - "esa": 28776, - "peshawar": 28777, - "cavalier": 28778, - "loren": 28779, - "cpi": 28780, - "idiots": 28781, - "shamrock": 28782, - "1646": 28783, - "dutton": 28784, - "malabar": 28785, - "mustache": 28786, - "##endez": 28787, - "##ocytes": 28788, - "referencing": 28789, - "terminates": 28790, - "marche": 28791, - "yarmouth": 28792, - "##sop": 28793, - "acton": 28794, - "mated": 28795, - "seton": 28796, - "subtly": 28797, - "baptised": 28798, - "beige": 28799, - "extremes": 28800, - "jolted": 28801, - "kristina": 28802, - "telecast": 28803, - "##actic": 28804, - "safeguard": 28805, - "waldo": 28806, - "##baldi": 28807, - "##bular": 28808, - "endeavors": 28809, - "sloppy": 28810, - "subterranean": 28811, - "##ensburg": 28812, - "##itung": 28813, - "delicately": 28814, - "pigment": 28815, - "tq": 28816, - "##scu": 28817, - "1626": 28818, - "##ound": 28819, - "collisions": 28820, - "coveted": 28821, - "herds": 28822, - "##personal": 28823, - "##meister": 28824, - "##nberger": 28825, - "chopra": 28826, - "##ricting": 28827, - "abnormalities": 28828, - "defective": 28829, - "galician": 28830, - "lucie": 28831, - "##dilly": 28832, - "alligator": 28833, - "likened": 28834, - "##genase": 28835, - "burundi": 28836, - "clears": 28837, - "complexion": 28838, - "derelict": 28839, - "deafening": 28840, - "diablo": 28841, - "fingered": 28842, - "champaign": 28843, - "dogg": 28844, - "enlist": 28845, - "isotope": 28846, - "labeling": 28847, - "mrna": 28848, - "##erre": 28849, - "brilliance": 28850, - "marvelous": 28851, - "##ayo": 28852, - "1652": 28853, - "crawley": 28854, - "ether": 28855, - "footed": 28856, - "dwellers": 28857, - "deserts": 28858, - "hamish": 28859, - "rubs": 28860, - "warlock": 28861, - "skimmed": 28862, - "##lizer": 28863, - "870": 28864, - "buick": 28865, - "embark": 28866, - "heraldic": 28867, - "irregularities": 28868, - "##ajan": 28869, - "kiara": 28870, - "##kulam": 28871, - "##ieg": 28872, - "antigen": 28873, - "kowalski": 28874, - "##lge": 28875, - "oakley": 28876, - "visitation": 28877, - "##mbit": 28878, - "vt": 28879, - "##suit": 28880, - "1570": 28881, - "murderers": 28882, - "##miento": 28883, - "##rites": 28884, - "chimneys": 28885, - "##sling": 28886, - "condemn": 28887, - "custer": 28888, - "exchequer": 28889, - "havre": 28890, - "##ghi": 28891, - "fluctuations": 28892, - "##rations": 28893, - "dfb": 28894, - "hendricks": 28895, - "vaccines": 28896, - "##tarian": 28897, - "nietzsche": 28898, - "biking": 28899, - "juicy": 28900, - "##duced": 28901, - "brooding": 28902, - "scrolling": 28903, - "selangor": 28904, - "##ragan": 28905, - "352": 28906, - "annum": 28907, - "boomed": 28908, - "seminole": 28909, - "sugarcane": 28910, - "##dna": 28911, - "departmental": 28912, - "dismissing": 28913, - "innsbruck": 28914, - "arteries": 28915, - "ashok": 28916, - "batavia": 28917, - "daze": 28918, - "kun": 28919, - "overtook": 28920, - "##rga": 28921, - "##tlan": 28922, - "beheaded": 28923, - "gaddafi": 28924, - "holm": 28925, - "electronically": 28926, - "faulty": 28927, - "galilee": 28928, - "fractures": 28929, - "kobayashi": 28930, - "##lized": 28931, - "gunmen": 28932, - "magma": 28933, - "aramaic": 28934, - "mala": 28935, - "eastenders": 28936, - "inference": 28937, - "messengers": 28938, - "bf": 28939, - "##qu": 28940, - "407": 28941, - "bathrooms": 28942, - "##vere": 28943, - "1658": 28944, - "flashbacks": 28945, - "ideally": 28946, - "misunderstood": 28947, - "##jali": 28948, - "##weather": 28949, - "mendez": 28950, - "##grounds": 28951, - "505": 28952, - "uncanny": 28953, - "##iii": 28954, - "1709": 28955, - "friendships": 28956, - "##nbc": 28957, - "sacrament": 28958, - "accommodated": 28959, - "reiterated": 28960, - "logistical": 28961, - "pebbles": 28962, - "thumped": 28963, - "##escence": 28964, - "administering": 28965, - "decrees": 28966, - "drafts": 28967, - "##flight": 28968, - "##cased": 28969, - "##tula": 28970, - "futuristic": 28971, - "picket": 28972, - "intimidation": 28973, - "winthrop": 28974, - "##fahan": 28975, - "interfered": 28976, - "339": 28977, - "afar": 28978, - "francoise": 28979, - "morally": 28980, - "uta": 28981, - "cochin": 28982, - "croft": 28983, - "dwarfs": 28984, - "##bruck": 28985, - "##dents": 28986, - "##nami": 28987, - "biker": 28988, - "##hner": 28989, - "##meral": 28990, - "nano": 28991, - "##isen": 28992, - "##ometric": 28993, - "##pres": 28994, - "##ан": 28995, - "brightened": 28996, - "meek": 28997, - "parcels": 28998, - "securely": 28999, - "gunners": 29000, - "##jhl": 29001, - "##zko": 29002, - "agile": 29003, - "hysteria": 29004, - "##lten": 29005, - "##rcus": 29006, - "bukit": 29007, - "champs": 29008, - "chevy": 29009, - "cuckoo": 29010, - "leith": 29011, - "sadler": 29012, - "theologians": 29013, - "welded": 29014, - "##section": 29015, - "1663": 29016, - "jj": 29017, - "plurality": 29018, - "xander": 29019, - "##rooms": 29020, - "##formed": 29021, - "shredded": 29022, - "temps": 29023, - "intimately": 29024, - "pau": 29025, - "tormented": 29026, - "##lok": 29027, - "##stellar": 29028, - "1618": 29029, - "charred": 29030, - "ems": 29031, - "essen": 29032, - "##mmel": 29033, - "alarms": 29034, - "spraying": 29035, - "ascot": 29036, - "blooms": 29037, - "twinkle": 29038, - "##abia": 29039, - "##apes": 29040, - "internment": 29041, - "obsidian": 29042, - "##chaft": 29043, - "snoop": 29044, - "##dav": 29045, - "##ooping": 29046, - "malibu": 29047, - "##tension": 29048, - "quiver": 29049, - "##itia": 29050, - "hays": 29051, - "mcintosh": 29052, - "travers": 29053, - "walsall": 29054, - "##ffie": 29055, - "1623": 29056, - "beverley": 29057, - "schwarz": 29058, - "plunging": 29059, - "structurally": 29060, - "m3": 29061, - "rosenthal": 29062, - "vikram": 29063, - "##tsk": 29064, - "770": 29065, - "ghz": 29066, - "##onda": 29067, - "##tiv": 29068, - "chalmers": 29069, - "groningen": 29070, - "pew": 29071, - "reckon": 29072, - "unicef": 29073, - "##rvis": 29074, - "55th": 29075, - "##gni": 29076, - "1651": 29077, - "sulawesi": 29078, - "avila": 29079, - "cai": 29080, - "metaphysical": 29081, - "screwing": 29082, - "turbulence": 29083, - "##mberg": 29084, - "augusto": 29085, - "samba": 29086, - "56th": 29087, - "baffled": 29088, - "momentary": 29089, - "toxin": 29090, - "##urian": 29091, - "##wani": 29092, - "aachen": 29093, - "condoms": 29094, - "dali": 29095, - "steppe": 29096, - "##3d": 29097, - "##app": 29098, - "##oed": 29099, - "##year": 29100, - "adolescence": 29101, - "dauphin": 29102, - "electrically": 29103, - "inaccessible": 29104, - "microscopy": 29105, - "nikita": 29106, - "##ega": 29107, - "atv": 29108, - "##cel": 29109, - "##enter": 29110, - "##oles": 29111, - "##oteric": 29112, - "##ы": 29113, - "accountants": 29114, - "punishments": 29115, - "wrongly": 29116, - "bribes": 29117, - "adventurous": 29118, - "clinch": 29119, - "flinders": 29120, - "southland": 29121, - "##hem": 29122, - "##kata": 29123, - "gough": 29124, - "##ciency": 29125, - "lads": 29126, - "soared": 29127, - "##ה": 29128, - "undergoes": 29129, - "deformation": 29130, - "outlawed": 29131, - "rubbish": 29132, - "##arus": 29133, - "##mussen": 29134, - "##nidae": 29135, - "##rzburg": 29136, - "arcs": 29137, - "##ingdon": 29138, - "##tituted": 29139, - "1695": 29140, - "wheelbase": 29141, - "wheeling": 29142, - "bombardier": 29143, - "campground": 29144, - "zebra": 29145, - "##lices": 29146, - "##oj": 29147, - "##bain": 29148, - "lullaby": 29149, - "##ecure": 29150, - "donetsk": 29151, - "wylie": 29152, - "grenada": 29153, - "##arding": 29154, - "##ης": 29155, - "squinting": 29156, - "eireann": 29157, - "opposes": 29158, - "##andra": 29159, - "maximal": 29160, - "runes": 29161, - "##broken": 29162, - "##cuting": 29163, - "##iface": 29164, - "##ror": 29165, - "##rosis": 29166, - "additive": 29167, - "britney": 29168, - "adultery": 29169, - "triggering": 29170, - "##drome": 29171, - "detrimental": 29172, - "aarhus": 29173, - "containment": 29174, - "jc": 29175, - "swapped": 29176, - "vichy": 29177, - "##ioms": 29178, - "madly": 29179, - "##oric": 29180, - "##rag": 29181, - "brant": 29182, - "##ckey": 29183, - "##trix": 29184, - "1560": 29185, - "1612": 29186, - "broughton": 29187, - "rustling": 29188, - "##stems": 29189, - "##uder": 29190, - "asbestos": 29191, - "mentoring": 29192, - "##nivorous": 29193, - "finley": 29194, - "leaps": 29195, - "##isan": 29196, - "apical": 29197, - "pry": 29198, - "slits": 29199, - "substitutes": 29200, - "##dict": 29201, - "intuitive": 29202, - "fantasia": 29203, - "insistent": 29204, - "unreasonable": 29205, - "##igen": 29206, - "##vna": 29207, - "domed": 29208, - "hannover": 29209, - "margot": 29210, - "ponder": 29211, - "##zziness": 29212, - "impromptu": 29213, - "jian": 29214, - "lc": 29215, - "rampage": 29216, - "stemming": 29217, - "##eft": 29218, - "andrey": 29219, - "gerais": 29220, - "whichever": 29221, - "amnesia": 29222, - "appropriated": 29223, - "anzac": 29224, - "clicks": 29225, - "modifying": 29226, - "ultimatum": 29227, - "cambrian": 29228, - "maids": 29229, - "verve": 29230, - "yellowstone": 29231, - "##mbs": 29232, - "conservatoire": 29233, - "##scribe": 29234, - "adherence": 29235, - "dinners": 29236, - "spectra": 29237, - "imperfect": 29238, - "mysteriously": 29239, - "sidekick": 29240, - "tatar": 29241, - "tuba": 29242, - "##aks": 29243, - "##ifolia": 29244, - "distrust": 29245, - "##athan": 29246, - "##zle": 29247, - "c2": 29248, - "ronin": 29249, - "zac": 29250, - "##pse": 29251, - "celaena": 29252, - "instrumentalist": 29253, - "scents": 29254, - "skopje": 29255, - "##mbling": 29256, - "comical": 29257, - "compensated": 29258, - "vidal": 29259, - "condor": 29260, - "intersect": 29261, - "jingle": 29262, - "wavelengths": 29263, - "##urrent": 29264, - "mcqueen": 29265, - "##izzly": 29266, - "carp": 29267, - "weasel": 29268, - "422": 29269, - "kanye": 29270, - "militias": 29271, - "postdoctoral": 29272, - "eugen": 29273, - "gunslinger": 29274, - "##ɛ": 29275, - "faux": 29276, - "hospice": 29277, - "##for": 29278, - "appalled": 29279, - "derivation": 29280, - "dwarves": 29281, - "##elis": 29282, - "dilapidated": 29283, - "##folk": 29284, - "astoria": 29285, - "philology": 29286, - "##lwyn": 29287, - "##otho": 29288, - "##saka": 29289, - "inducing": 29290, - "philanthropy": 29291, - "##bf": 29292, - "##itative": 29293, - "geek": 29294, - "markedly": 29295, - "sql": 29296, - "##yce": 29297, - "bessie": 29298, - "indices": 29299, - "rn": 29300, - "##flict": 29301, - "495": 29302, - "frowns": 29303, - "resolving": 29304, - "weightlifting": 29305, - "tugs": 29306, - "cleric": 29307, - "contentious": 29308, - "1653": 29309, - "mania": 29310, - "rms": 29311, - "##miya": 29312, - "##reate": 29313, - "##ruck": 29314, - "##tucket": 29315, - "bien": 29316, - "eels": 29317, - "marek": 29318, - "##ayton": 29319, - "##cence": 29320, - "discreet": 29321, - "unofficially": 29322, - "##ife": 29323, - "leaks": 29324, - "##bber": 29325, - "1705": 29326, - "332": 29327, - "dung": 29328, - "compressor": 29329, - "hillsborough": 29330, - "pandit": 29331, - "shillings": 29332, - "distal": 29333, - "##skin": 29334, - "381": 29335, - "##tat": 29336, - "##you": 29337, - "nosed": 29338, - "##nir": 29339, - "mangrove": 29340, - "undeveloped": 29341, - "##idia": 29342, - "textures": 29343, - "##inho": 29344, - "##500": 29345, - "##rise": 29346, - "ae": 29347, - "irritating": 29348, - "nay": 29349, - "amazingly": 29350, - "bancroft": 29351, - "apologetic": 29352, - "compassionate": 29353, - "kata": 29354, - "symphonies": 29355, - "##lovic": 29356, - "airspace": 29357, - "##lch": 29358, - "930": 29359, - "gifford": 29360, - "precautions": 29361, - "fulfillment": 29362, - "sevilla": 29363, - "vulgar": 29364, - "martinique": 29365, - "##urities": 29366, - "looting": 29367, - "piccolo": 29368, - "tidy": 29369, - "##dermott": 29370, - "quadrant": 29371, - "armchair": 29372, - "incomes": 29373, - "mathematicians": 29374, - "stampede": 29375, - "nilsson": 29376, - "##inking": 29377, - "##scan": 29378, - "foo": 29379, - "quarterfinal": 29380, - "##ostal": 29381, - "shang": 29382, - "shouldered": 29383, - "squirrels": 29384, - "##owe": 29385, - "344": 29386, - "vinegar": 29387, - "##bner": 29388, - "##rchy": 29389, - "##systems": 29390, - "delaying": 29391, - "##trics": 29392, - "ars": 29393, - "dwyer": 29394, - "rhapsody": 29395, - "sponsoring": 29396, - "##gration": 29397, - "bipolar": 29398, - "cinder": 29399, - "starters": 29400, - "##olio": 29401, - "##urst": 29402, - "421": 29403, - "signage": 29404, - "##nty": 29405, - "aground": 29406, - "figurative": 29407, - "mons": 29408, - "acquaintances": 29409, - "duets": 29410, - "erroneously": 29411, - "soyuz": 29412, - "elliptic": 29413, - "recreated": 29414, - "##cultural": 29415, - "##quette": 29416, - "##ssed": 29417, - "##tma": 29418, - "##zcz": 29419, - "moderator": 29420, - "scares": 29421, - "##itaire": 29422, - "##stones": 29423, - "##udence": 29424, - "juniper": 29425, - "sighting": 29426, - "##just": 29427, - "##nsen": 29428, - "britten": 29429, - "calabria": 29430, - "ry": 29431, - "bop": 29432, - "cramer": 29433, - "forsyth": 29434, - "stillness": 29435, - "##л": 29436, - "airmen": 29437, - "gathers": 29438, - "unfit": 29439, - "##umber": 29440, - "##upt": 29441, - "taunting": 29442, - "##rip": 29443, - "seeker": 29444, - "streamlined": 29445, - "##bution": 29446, - "holster": 29447, - "schumann": 29448, - "tread": 29449, - "vox": 29450, - "##gano": 29451, - "##onzo": 29452, - "strive": 29453, - "dil": 29454, - "reforming": 29455, - "covent": 29456, - "newbury": 29457, - "predicting": 29458, - "##orro": 29459, - "decorate": 29460, - "tre": 29461, - "##puted": 29462, - "andover": 29463, - "ie": 29464, - "asahi": 29465, - "dept": 29466, - "dunkirk": 29467, - "gills": 29468, - "##tori": 29469, - "buren": 29470, - "huskies": 29471, - "##stis": 29472, - "##stov": 29473, - "abstracts": 29474, - "bets": 29475, - "loosen": 29476, - "##opa": 29477, - "1682": 29478, - "yearning": 29479, - "##glio": 29480, - "##sir": 29481, - "berman": 29482, - "effortlessly": 29483, - "enamel": 29484, - "napoli": 29485, - "persist": 29486, - "##peration": 29487, - "##uez": 29488, - "attache": 29489, - "elisa": 29490, - "b1": 29491, - "invitations": 29492, - "##kic": 29493, - "accelerating": 29494, - "reindeer": 29495, - "boardwalk": 29496, - "clutches": 29497, - "nelly": 29498, - "polka": 29499, - "starbucks": 29500, - "##kei": 29501, - "adamant": 29502, - "huey": 29503, - "lough": 29504, - "unbroken": 29505, - "adventurer": 29506, - "embroidery": 29507, - "inspecting": 29508, - "stanza": 29509, - "##ducted": 29510, - "naia": 29511, - "taluka": 29512, - "##pone": 29513, - "##roids": 29514, - "chases": 29515, - "deprivation": 29516, - "florian": 29517, - "##jing": 29518, - "##ppet": 29519, - "earthly": 29520, - "##lib": 29521, - "##ssee": 29522, - "colossal": 29523, - "foreigner": 29524, - "vet": 29525, - "freaks": 29526, - "patrice": 29527, - "rosewood": 29528, - "triassic": 29529, - "upstate": 29530, - "##pkins": 29531, - "dominates": 29532, - "ata": 29533, - "chants": 29534, - "ks": 29535, - "vo": 29536, - "##400": 29537, - "##bley": 29538, - "##raya": 29539, - "##rmed": 29540, - "555": 29541, - "agra": 29542, - "infiltrate": 29543, - "##ailing": 29544, - "##ilation": 29545, - "##tzer": 29546, - "##uppe": 29547, - "##werk": 29548, - "binoculars": 29549, - "enthusiast": 29550, - "fujian": 29551, - "squeak": 29552, - "##avs": 29553, - "abolitionist": 29554, - "almeida": 29555, - "boredom": 29556, - "hampstead": 29557, - "marsden": 29558, - "rations": 29559, - "##ands": 29560, - "inflated": 29561, - "334": 29562, - "bonuses": 29563, - "rosalie": 29564, - "patna": 29565, - "##rco": 29566, - "329": 29567, - "detachments": 29568, - "penitentiary": 29569, - "54th": 29570, - "flourishing": 29571, - "woolf": 29572, - "##dion": 29573, - "##etched": 29574, - "papyrus": 29575, - "##lster": 29576, - "##nsor": 29577, - "##toy": 29578, - "bobbed": 29579, - "dismounted": 29580, - "endelle": 29581, - "inhuman": 29582, - "motorola": 29583, - "tbs": 29584, - "wince": 29585, - "wreath": 29586, - "##ticus": 29587, - "hideout": 29588, - "inspections": 29589, - "sanjay": 29590, - "disgrace": 29591, - "infused": 29592, - "pudding": 29593, - "stalks": 29594, - "##urbed": 29595, - "arsenic": 29596, - "leases": 29597, - "##hyl": 29598, - "##rrard": 29599, - "collarbone": 29600, - "##waite": 29601, - "##wil": 29602, - "dowry": 29603, - "##bant": 29604, - "##edance": 29605, - "genealogical": 29606, - "nitrate": 29607, - "salamanca": 29608, - "scandals": 29609, - "thyroid": 29610, - "necessitated": 29611, - "##!": 29612, - "##\"": 29613, - "###": 29614, - "##$": 29615, - "##%": 29616, - "##&": 29617, - "##'": 29618, - "##(": 29619, - "##)": 29620, - "##*": 29621, - "##+": 29622, - "##,": 29623, - "##-": 29624, - "##.": 29625, - "##/": 29626, - "##:": 29627, - "##;": 29628, - "##<": 29629, - "##=": 29630, - "##>": 29631, - "##?": 29632, - "##@": 29633, - "##[": 29634, - "##\\": 29635, - "##]": 29636, - "##^": 29637, - "##_": 29638, - "##`": 29639, - "##{": 29640, - "##|": 29641, - "##}": 29642, - "##~": 29643, - "##¡": 29644, - "##¢": 29645, - "##£": 29646, - "##¤": 29647, - "##¥": 29648, - "##¦": 29649, - "##§": 29650, - "##¨": 29651, - "##©": 29652, - "##ª": 29653, - "##«": 29654, - "##¬": 29655, - "##®": 29656, - "##±": 29657, - "##´": 29658, - "##µ": 29659, - "##¶": 29660, - "##·": 29661, - "##º": 29662, - "##»": 29663, - "##¼": 29664, - "##¾": 29665, - "##¿": 29666, - "##æ": 29667, - "##ð": 29668, - "##÷": 29669, - "##þ": 29670, - "##đ": 29671, - "##ħ": 29672, - "##ŋ": 29673, - "##œ": 29674, - "##ƒ": 29675, - "##ɐ": 29676, - "##ɑ": 29677, - "##ɒ": 29678, - "##ɔ": 29679, - "##ɕ": 29680, - "##ə": 29681, - "##ɡ": 29682, - "##ɣ": 29683, - "##ɨ": 29684, - "##ɪ": 29685, - "##ɫ": 29686, - "##ɬ": 29687, - "##ɯ": 29688, - "##ɲ": 29689, - "##ɴ": 29690, - "##ɹ": 29691, - "##ɾ": 29692, - "##ʀ": 29693, - "##ʁ": 29694, - "##ʂ": 29695, - "##ʃ": 29696, - "##ʉ": 29697, - "##ʊ": 29698, - "##ʋ": 29699, - "##ʌ": 29700, - "##ʎ": 29701, - "##ʐ": 29702, - "##ʑ": 29703, - "##ʒ": 29704, - "##ʔ": 29705, - "##ʰ": 29706, - "##ʲ": 29707, - "##ʳ": 29708, - "##ʷ": 29709, - "##ʸ": 29710, - "##ʻ": 29711, - "##ʼ": 29712, - "##ʾ": 29713, - "##ʿ": 29714, - "##ˈ": 29715, - "##ˡ": 29716, - "##ˢ": 29717, - "##ˣ": 29718, - "##ˤ": 29719, - "##β": 29720, - "##γ": 29721, - "##δ": 29722, - "##ε": 29723, - "##ζ": 29724, - "##θ": 29725, - "##κ": 29726, - "##λ": 29727, - "##μ": 29728, - "##ξ": 29729, - "##ο": 29730, - "##π": 29731, - "##ρ": 29732, - "##σ": 29733, - "##τ": 29734, - "##υ": 29735, - "##φ": 29736, - "##χ": 29737, - "##ψ": 29738, - "##ω": 29739, - "##б": 29740, - "##г": 29741, - "##д": 29742, - "##ж": 29743, - "##з": 29744, - "##м": 29745, - "##п": 29746, - "##с": 29747, - "##у": 29748, - "##ф": 29749, - "##х": 29750, - "##ц": 29751, - "##ч": 29752, - "##ш": 29753, - "##щ": 29754, - "##ъ": 29755, - "##э": 29756, - "##ю": 29757, - "##ђ": 29758, - "##є": 29759, - "##і": 29760, - "##ј": 29761, - "##љ": 29762, - "##њ": 29763, - "##ћ": 29764, - "##ӏ": 29765, - "##ա": 29766, - "##բ": 29767, - "##գ": 29768, - "##դ": 29769, - "##ե": 29770, - "##թ": 29771, - "##ի": 29772, - "##լ": 29773, - "##կ": 29774, - "##հ": 29775, - "##մ": 29776, - "##յ": 29777, - "##ն": 29778, - "##ո": 29779, - "##պ": 29780, - "##ս": 29781, - "##վ": 29782, - "##տ": 29783, - "##ր": 29784, - "##ւ": 29785, - "##ք": 29786, - "##־": 29787, - "##א": 29788, - "##ב": 29789, - "##ג": 29790, - "##ד": 29791, - "##ו": 29792, - "##ז": 29793, - "##ח": 29794, - "##ט": 29795, - "##י": 29796, - "##ך": 29797, - "##כ": 29798, - "##ל": 29799, - "##ם": 29800, - "##מ": 29801, - "##ן": 29802, - "##נ": 29803, - "##ס": 29804, - "##ע": 29805, - "##ף": 29806, - "##פ": 29807, - "##ץ": 29808, - "##צ": 29809, - "##ק": 29810, - "##ר": 29811, - "##ש": 29812, - "##ת": 29813, - "##،": 29814, - "##ء": 29815, - "##ب": 29816, - "##ت": 29817, - "##ث": 29818, - "##ج": 29819, - "##ح": 29820, - "##خ": 29821, - "##ذ": 29822, - "##ز": 29823, - "##س": 29824, - "##ش": 29825, - "##ص": 29826, - "##ض": 29827, - "##ط": 29828, - "##ظ": 29829, - "##ع": 29830, - "##غ": 29831, - "##ـ": 29832, - "##ف": 29833, - "##ق": 29834, - "##ك": 29835, - "##و": 29836, - "##ى": 29837, - "##ٹ": 29838, - "##پ": 29839, - "##چ": 29840, - "##ک": 29841, - "##گ": 29842, - "##ں": 29843, - "##ھ": 29844, - "##ہ": 29845, - "##ے": 29846, - "##अ": 29847, - "##आ": 29848, - "##उ": 29849, - "##ए": 29850, - "##क": 29851, - "##ख": 29852, - "##ग": 29853, - "##च": 29854, - "##ज": 29855, - "##ट": 29856, - "##ड": 29857, - "##ण": 29858, - "##त": 29859, - "##थ": 29860, - "##द": 29861, - "##ध": 29862, - "##न": 29863, - "##प": 29864, - "##ब": 29865, - "##भ": 29866, - "##म": 29867, - "##य": 29868, - "##र": 29869, - "##ल": 29870, - "##व": 29871, - "##श": 29872, - "##ष": 29873, - "##स": 29874, - "##ह": 29875, - "##ा": 29876, - "##ि": 29877, - "##ी": 29878, - "##ो": 29879, - "##।": 29880, - "##॥": 29881, - "##ং": 29882, - "##অ": 29883, - "##আ": 29884, - "##ই": 29885, - "##উ": 29886, - "##এ": 29887, - "##ও": 29888, - "##ক": 29889, - "##খ": 29890, - "##গ": 29891, - "##চ": 29892, - "##ছ": 29893, - "##জ": 29894, - "##ট": 29895, - "##ড": 29896, - "##ণ": 29897, - "##ত": 29898, - "##থ": 29899, - "##দ": 29900, - "##ধ": 29901, - "##ন": 29902, - "##প": 29903, - "##ব": 29904, - "##ভ": 29905, - "##ম": 29906, - "##য": 29907, - "##র": 29908, - "##ল": 29909, - "##শ": 29910, - "##ষ": 29911, - "##স": 29912, - "##হ": 29913, - "##া": 29914, - "##ি": 29915, - "##ী": 29916, - "##ে": 29917, - "##க": 29918, - "##ச": 29919, - "##ட": 29920, - "##த": 29921, - "##ந": 29922, - "##ன": 29923, - "##ப": 29924, - "##ம": 29925, - "##ய": 29926, - "##ர": 29927, - "##ல": 29928, - "##ள": 29929, - "##வ": 29930, - "##ா": 29931, - "##ி": 29932, - "##ு": 29933, - "##ே": 29934, - "##ை": 29935, - "##ನ": 29936, - "##ರ": 29937, - "##ಾ": 29938, - "##ක": 29939, - "##ය": 29940, - "##ර": 29941, - "##ල": 29942, - "##ව": 29943, - "##ා": 29944, - "##ก": 29945, - "##ง": 29946, - "##ต": 29947, - "##ท": 29948, - "##น": 29949, - "##พ": 29950, - "##ม": 29951, - "##ย": 29952, - "##ร": 29953, - "##ล": 29954, - "##ว": 29955, - "##ส": 29956, - "##อ": 29957, - "##า": 29958, - "##เ": 29959, - "##་": 29960, - "##།": 29961, - "##ག": 29962, - "##ང": 29963, - "##ད": 29964, - "##ན": 29965, - "##པ": 29966, - "##བ": 29967, - "##མ": 29968, - "##འ": 29969, - "##ར": 29970, - "##ལ": 29971, - "##ས": 29972, - "##မ": 29973, - "##ა": 29974, - "##ბ": 29975, - "##გ": 29976, - "##დ": 29977, - "##ე": 29978, - "##ვ": 29979, - "##თ": 29980, - "##ი": 29981, - "##კ": 29982, - "##ლ": 29983, - "##მ": 29984, - "##ნ": 29985, - "##ო": 29986, - "##რ": 29987, - "##ს": 29988, - "##ტ": 29989, - "##უ": 29990, - "##ᄀ": 29991, - "##ᄂ": 29992, - "##ᄃ": 29993, - "##ᄅ": 29994, - "##ᄆ": 29995, - "##ᄇ": 29996, - "##ᄉ": 29997, - "##ᄊ": 29998, - "##ᄋ": 29999, - "##ᄌ": 30000, - "##ᄎ": 30001, - "##ᄏ": 30002, - "##ᄐ": 30003, - "##ᄑ": 30004, - "##ᄒ": 30005, - "##ᅡ": 30006, - "##ᅢ": 30007, - "##ᅥ": 30008, - "##ᅦ": 30009, - "##ᅧ": 30010, - "##ᅩ": 30011, - "##ᅪ": 30012, - "##ᅭ": 30013, - "##ᅮ": 30014, - "##ᅯ": 30015, - "##ᅲ": 30016, - "##ᅳ": 30017, - "##ᅴ": 30018, - "##ᅵ": 30019, - "##ᆨ": 30020, - "##ᆫ": 30021, - "##ᆯ": 30022, - "##ᆷ": 30023, - "##ᆸ": 30024, - "##ᆼ": 30025, - "##ᴬ": 30026, - "##ᴮ": 30027, - "##ᴰ": 30028, - "##ᴵ": 30029, - "##ᴺ": 30030, - "##ᵀ": 30031, - "##ᵃ": 30032, - "##ᵇ": 30033, - "##ᵈ": 30034, - "##ᵉ": 30035, - "##ᵍ": 30036, - "##ᵏ": 30037, - "##ᵐ": 30038, - "##ᵒ": 30039, - "##ᵖ": 30040, - "##ᵗ": 30041, - "##ᵘ": 30042, - "##ᵣ": 30043, - "##ᵤ": 30044, - "##ᵥ": 30045, - "##ᶜ": 30046, - "##ᶠ": 30047, - "##‐": 30048, - "##‑": 30049, - "##‒": 30050, - "##–": 30051, - "##—": 30052, - "##―": 30053, - "##‖": 30054, - "##‘": 30055, - "##’": 30056, - "##‚": 30057, - "##“": 30058, - "##”": 30059, - "##„": 30060, - "##†": 30061, - "##‡": 30062, - "##•": 30063, - "##…": 30064, - "##‰": 30065, - "##′": 30066, - "##″": 30067, - "##›": 30068, - "##‿": 30069, - "##⁄": 30070, - "##⁰": 30071, - "##ⁱ": 30072, - "##⁴": 30073, - "##⁵": 30074, - "##⁶": 30075, - "##⁷": 30076, - "##⁸": 30077, - "##⁹": 30078, - "##⁻": 30079, - "##ⁿ": 30080, - "##₅": 30081, - "##₆": 30082, - "##₇": 30083, - "##₈": 30084, - "##₉": 30085, - "##₊": 30086, - "##₍": 30087, - "##₎": 30088, - "##ₐ": 30089, - "##ₑ": 30090, - "##ₒ": 30091, - "##ₓ": 30092, - "##ₕ": 30093, - "##ₖ": 30094, - "##ₗ": 30095, - "##ₘ": 30096, - "##ₚ": 30097, - "##ₛ": 30098, - "##ₜ": 30099, - "##₤": 30100, - "##₩": 30101, - "##€": 30102, - "##₱": 30103, - "##₹": 30104, - "##ℓ": 30105, - "##№": 30106, - "##ℝ": 30107, - "##™": 30108, - "##⅓": 30109, - "##⅔": 30110, - "##←": 30111, - "##↑": 30112, - "##→": 30113, - "##↓": 30114, - "##↔": 30115, - "##↦": 30116, - "##⇄": 30117, - "##⇌": 30118, - "##⇒": 30119, - "##∂": 30120, - "##∅": 30121, - "##∆": 30122, - "##∇": 30123, - "##∈": 30124, - "##∗": 30125, - "##∘": 30126, - "##√": 30127, - "##∞": 30128, - "##∧": 30129, - "##∨": 30130, - "##∩": 30131, - "##∪": 30132, - "##≈": 30133, - "##≡": 30134, - "##≤": 30135, - "##≥": 30136, - "##⊂": 30137, - "##⊆": 30138, - "##⊕": 30139, - "##⊗": 30140, - "##⋅": 30141, - "##─": 30142, - "##│": 30143, - "##■": 30144, - "##▪": 30145, - "##●": 30146, - "##★": 30147, - "##☆": 30148, - "##☉": 30149, - "##♠": 30150, - "##♣": 30151, - "##♥": 30152, - "##♦": 30153, - "##♯": 30154, - "##⟨": 30155, - "##⟩": 30156, - "##ⱼ": 30157, - "##⺩": 30158, - "##⺼": 30159, - "##⽥": 30160, - "##、": 30161, - "##。": 30162, - "##〈": 30163, - "##〉": 30164, - "##《": 30165, - "##》": 30166, - "##「": 30167, - "##」": 30168, - "##『": 30169, - "##』": 30170, - "##〜": 30171, - "##あ": 30172, - "##い": 30173, - "##う": 30174, - "##え": 30175, - "##お": 30176, - "##か": 30177, - "##き": 30178, - "##く": 30179, - "##け": 30180, - "##こ": 30181, - "##さ": 30182, - "##し": 30183, - "##す": 30184, - "##せ": 30185, - "##そ": 30186, - "##た": 30187, - "##ち": 30188, - "##っ": 30189, - "##つ": 30190, - "##て": 30191, - "##と": 30192, - "##な": 30193, - "##に": 30194, - "##ぬ": 30195, - "##ね": 30196, - "##の": 30197, - "##は": 30198, - "##ひ": 30199, - "##ふ": 30200, - "##へ": 30201, - "##ほ": 30202, - "##ま": 30203, - "##み": 30204, - "##む": 30205, - "##め": 30206, - "##も": 30207, - "##や": 30208, - "##ゆ": 30209, - "##よ": 30210, - "##ら": 30211, - "##り": 30212, - "##る": 30213, - "##れ": 30214, - "##ろ": 30215, - "##を": 30216, - "##ん": 30217, - "##ァ": 30218, - "##ア": 30219, - "##ィ": 30220, - "##イ": 30221, - "##ウ": 30222, - "##ェ": 30223, - "##エ": 30224, - "##オ": 30225, - "##カ": 30226, - "##キ": 30227, - "##ク": 30228, - "##ケ": 30229, - "##コ": 30230, - "##サ": 30231, - "##シ": 30232, - "##ス": 30233, - "##セ": 30234, - "##タ": 30235, - "##チ": 30236, - "##ッ": 30237, - "##ツ": 30238, - "##テ": 30239, - "##ト": 30240, - "##ナ": 30241, - "##ニ": 30242, - "##ノ": 30243, - "##ハ": 30244, - "##ヒ": 30245, - "##フ": 30246, - "##ヘ": 30247, - "##ホ": 30248, - "##マ": 30249, - "##ミ": 30250, - "##ム": 30251, - "##メ": 30252, - "##モ": 30253, - "##ャ": 30254, - "##ュ": 30255, - "##ョ": 30256, - "##ラ": 30257, - "##リ": 30258, - "##ル": 30259, - "##レ": 30260, - "##ロ": 30261, - "##ワ": 30262, - "##ン": 30263, - "##・": 30264, - "##ー": 30265, - "##一": 30266, - "##三": 30267, - "##上": 30268, - "##下": 30269, - "##不": 30270, - "##世": 30271, - "##中": 30272, - "##主": 30273, - "##久": 30274, - "##之": 30275, - "##也": 30276, - "##事": 30277, - "##二": 30278, - "##五": 30279, - "##井": 30280, - "##京": 30281, - "##人": 30282, - "##亻": 30283, - "##仁": 30284, - "##介": 30285, - "##代": 30286, - "##仮": 30287, - "##伊": 30288, - "##会": 30289, - "##佐": 30290, - "##侍": 30291, - "##保": 30292, - "##信": 30293, - "##健": 30294, - "##元": 30295, - "##光": 30296, - "##八": 30297, - "##公": 30298, - "##内": 30299, - "##出": 30300, - "##分": 30301, - "##前": 30302, - "##劉": 30303, - "##力": 30304, - "##加": 30305, - "##勝": 30306, - "##北": 30307, - "##区": 30308, - "##十": 30309, - "##千": 30310, - "##南": 30311, - "##博": 30312, - "##原": 30313, - "##口": 30314, - "##古": 30315, - "##史": 30316, - "##司": 30317, - "##合": 30318, - "##吉": 30319, - "##同": 30320, - "##名": 30321, - "##和": 30322, - "##囗": 30323, - "##四": 30324, - "##国": 30325, - "##國": 30326, - "##土": 30327, - "##地": 30328, - "##坂": 30329, - "##城": 30330, - "##堂": 30331, - "##場": 30332, - "##士": 30333, - "##夏": 30334, - "##外": 30335, - "##大": 30336, - "##天": 30337, - "##太": 30338, - "##夫": 30339, - "##奈": 30340, - "##女": 30341, - "##子": 30342, - "##学": 30343, - "##宀": 30344, - "##宇": 30345, - "##安": 30346, - "##宗": 30347, - "##定": 30348, - "##宣": 30349, - "##宮": 30350, - "##家": 30351, - "##宿": 30352, - "##寺": 30353, - "##將": 30354, - "##小": 30355, - "##尚": 30356, - "##山": 30357, - "##岡": 30358, - "##島": 30359, - "##崎": 30360, - "##川": 30361, - "##州": 30362, - "##巿": 30363, - "##帝": 30364, - "##平": 30365, - "##年": 30366, - "##幸": 30367, - "##广": 30368, - "##弘": 30369, - "##張": 30370, - "##彳": 30371, - "##後": 30372, - "##御": 30373, - "##德": 30374, - "##心": 30375, - "##忄": 30376, - "##志": 30377, - "##忠": 30378, - "##愛": 30379, - "##成": 30380, - "##我": 30381, - "##戦": 30382, - "##戸": 30383, - "##手": 30384, - "##扌": 30385, - "##政": 30386, - "##文": 30387, - "##新": 30388, - "##方": 30389, - "##日": 30390, - "##明": 30391, - "##星": 30392, - "##春": 30393, - "##昭": 30394, - "##智": 30395, - "##曲": 30396, - "##書": 30397, - "##月": 30398, - "##有": 30399, - "##朝": 30400, - "##木": 30401, - "##本": 30402, - "##李": 30403, - "##村": 30404, - "##東": 30405, - "##松": 30406, - "##林": 30407, - "##森": 30408, - "##楊": 30409, - "##樹": 30410, - "##橋": 30411, - "##歌": 30412, - "##止": 30413, - "##正": 30414, - "##武": 30415, - "##比": 30416, - "##氏": 30417, - "##民": 30418, - "##水": 30419, - "##氵": 30420, - "##氷": 30421, - "##永": 30422, - "##江": 30423, - "##沢": 30424, - "##河": 30425, - "##治": 30426, - "##法": 30427, - "##海": 30428, - "##清": 30429, - "##漢": 30430, - "##瀬": 30431, - "##火": 30432, - "##版": 30433, - "##犬": 30434, - "##王": 30435, - "##生": 30436, - "##田": 30437, - "##男": 30438, - "##疒": 30439, - "##発": 30440, - "##白": 30441, - "##的": 30442, - "##皇": 30443, - "##目": 30444, - "##相": 30445, - "##省": 30446, - "##真": 30447, - "##石": 30448, - "##示": 30449, - "##社": 30450, - "##神": 30451, - "##福": 30452, - "##禾": 30453, - "##秀": 30454, - "##秋": 30455, - "##空": 30456, - "##立": 30457, - "##章": 30458, - "##竹": 30459, - "##糹": 30460, - "##美": 30461, - "##義": 30462, - "##耳": 30463, - "##良": 30464, - "##艹": 30465, - "##花": 30466, - "##英": 30467, - "##華": 30468, - "##葉": 30469, - "##藤": 30470, - "##行": 30471, - "##街": 30472, - "##西": 30473, - "##見": 30474, - "##訁": 30475, - "##語": 30476, - "##谷": 30477, - "##貝": 30478, - "##貴": 30479, - "##車": 30480, - "##軍": 30481, - "##辶": 30482, - "##道": 30483, - "##郎": 30484, - "##郡": 30485, - "##部": 30486, - "##都": 30487, - "##里": 30488, - "##野": 30489, - "##金": 30490, - "##鈴": 30491, - "##镇": 30492, - "##長": 30493, - "##門": 30494, - "##間": 30495, - "##阝": 30496, - "##阿": 30497, - "##陳": 30498, - "##陽": 30499, - "##雄": 30500, - "##青": 30501, - "##面": 30502, - "##風": 30503, - "##食": 30504, - "##香": 30505, - "##馬": 30506, - "##高": 30507, - "##龍": 30508, - "##龸": 30509, - "##fi": 30510, - "##fl": 30511, - "##!": 30512, - "##(": 30513, - "##)": 30514, - "##,": 30515, - "##-": 30516, - "##.": 30517, - "##/": 30518, - "##:": 30519, - "##?": 30520, - "##~": 30521, - "bowang": 30522, - "georgiosmastrapas": 30523, - "jackminong": 30524, - "alaeddineabdessalem": 30525, - "isabellemohr": 30526, - "michaelguenther": 30527 - } - } -} \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/tokenizer/tokenizer_config.json b/api/core/model_runtime/model_providers/jina/text_embedding/tokenizer/tokenizer_config.json deleted file mode 100644 index 91f9e357cc..0000000000 --- a/api/core/model_runtime/model_providers/jina/text_embedding/tokenizer/tokenizer_config.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "clean_up_tokenization_spaces": true, - "cls_token": "[CLS]", - "do_basic_tokenize": true, - "do_lower_case": true, - "mask_token": "[MASK]", - "model_max_length": 2147483648, - "never_split": null, - "pad_token": "[PAD]", - "sep_token": "[SEP]", - "strip_accents": null, - "tokenize_chinese_chars": true, - "tokenizer_class": "BertTokenizer", - "unk_token": "[UNK]" -} diff --git a/api/core/model_runtime/model_providers/leptonai/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/leptonai/_assets/icon_l_en.png deleted file mode 100644 index 719122b284..0000000000 Binary files a/api/core/model_runtime/model_providers/leptonai/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/leptonai/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/leptonai/_assets/icon_s_en.png deleted file mode 100644 index 3f652d96e1..0000000000 Binary files a/api/core/model_runtime/model_providers/leptonai/_assets/icon_s_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/leptonai/leptonai.py b/api/core/model_runtime/model_providers/leptonai/leptonai.py deleted file mode 100644 index 34a55ff192..0000000000 --- a/api/core/model_runtime/model_providers/leptonai/leptonai.py +++ /dev/null @@ -1,26 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class LeptonAIProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - model_instance.validate_credentials(model="llama2-7b", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/leptonai/leptonai.yaml b/api/core/model_runtime/model_providers/leptonai/leptonai.yaml deleted file mode 100644 index a246ff5356..0000000000 --- a/api/core/model_runtime/model_providers/leptonai/leptonai.yaml +++ /dev/null @@ -1,29 +0,0 @@ -provider: leptonai -label: - zh_Hans: Lepton AI - en_US: Lepton AI -icon_small: - en_US: icon_s_en.png -icon_large: - en_US: icon_l_en.png -background: "#F5F5F4" -help: - title: - en_US: Get your API Key from Lepton AI - zh_Hans: 从 Lepton AI 获取 API Key - url: - en_US: https://dashboard.lepton.ai -supported_model_types: - - llm -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/leptonai/llm/_position.yaml b/api/core/model_runtime/model_providers/leptonai/llm/_position.yaml deleted file mode 100644 index a85e8e65ba..0000000000 --- a/api/core/model_runtime/model_providers/leptonai/llm/_position.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- gemma-7b -- mistral-7b -- mixtral-8x7b -- llama2-7b -- llama2-13b -- llama3-70b diff --git a/api/core/model_runtime/model_providers/leptonai/llm/gemma-7b.yaml b/api/core/model_runtime/model_providers/leptonai/llm/gemma-7b.yaml deleted file mode 100644 index 2d69067a23..0000000000 --- a/api/core/model_runtime/model_providers/leptonai/llm/gemma-7b.yaml +++ /dev/null @@ -1,20 +0,0 @@ -model: gemma-7b -label: - zh_Hans: gemma-7b - en_US: gemma-7b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 1024 diff --git a/api/core/model_runtime/model_providers/leptonai/llm/llama2-13b.yaml b/api/core/model_runtime/model_providers/leptonai/llm/llama2-13b.yaml deleted file mode 100644 index 307f1ea88f..0000000000 --- a/api/core/model_runtime/model_providers/leptonai/llm/llama2-13b.yaml +++ /dev/null @@ -1,20 +0,0 @@ -model: llama2-13b -label: - zh_Hans: llama2-13b - en_US: llama2-13b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 1024 diff --git a/api/core/model_runtime/model_providers/leptonai/llm/llama2-7b.yaml b/api/core/model_runtime/model_providers/leptonai/llm/llama2-7b.yaml deleted file mode 100644 index bd471e59cd..0000000000 --- a/api/core/model_runtime/model_providers/leptonai/llm/llama2-7b.yaml +++ /dev/null @@ -1,20 +0,0 @@ -model: llama2-7b -label: - zh_Hans: llama2-7b - en_US: llama2-7b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 1024 diff --git a/api/core/model_runtime/model_providers/leptonai/llm/llama3-70b.yaml b/api/core/model_runtime/model_providers/leptonai/llm/llama3-70b.yaml deleted file mode 100644 index 9c20eb6cdb..0000000000 --- a/api/core/model_runtime/model_providers/leptonai/llm/llama3-70b.yaml +++ /dev/null @@ -1,20 +0,0 @@ -model: llama3-70b -label: - zh_Hans: llama3-70b - en_US: llama3-70b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 1024 diff --git a/api/core/model_runtime/model_providers/leptonai/llm/llm.py b/api/core/model_runtime/model_providers/leptonai/llm/llm.py deleted file mode 100644 index 3d69417e45..0000000000 --- a/api/core/model_runtime/model_providers/leptonai/llm/llm.py +++ /dev/null @@ -1,40 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union - -from core.model_runtime.entities.llm_entities import LLMResult -from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel - - -class LeptonAILargeLanguageModel(OAIAPICompatLargeLanguageModel): - MODEL_PREFIX_MAP = { - "llama2-7b": "llama2-7b", - "gemma-7b": "gemma-7b", - "mistral-7b": "mistral-7b", - "mixtral-8x7b": "mixtral-8x7b", - "llama3-70b": "llama3-70b", - "llama2-13b": "llama2-13b", - } - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._add_custom_parameters(credentials, model) - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream) - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials, model) - super().validate_credentials(model, credentials) - - @classmethod - def _add_custom_parameters(cls, credentials: dict, model: str) -> None: - credentials["mode"] = "chat" - credentials["endpoint_url"] = f"https://{cls.MODEL_PREFIX_MAP[model]}.lepton.run/api/v1" diff --git a/api/core/model_runtime/model_providers/leptonai/llm/mistral-7b.yaml b/api/core/model_runtime/model_providers/leptonai/llm/mistral-7b.yaml deleted file mode 100644 index f2b46ff917..0000000000 --- a/api/core/model_runtime/model_providers/leptonai/llm/mistral-7b.yaml +++ /dev/null @@ -1,20 +0,0 @@ -model: mistral-7b -label: - zh_Hans: mistral-7b - en_US: mistral-7b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 1024 diff --git a/api/core/model_runtime/model_providers/leptonai/llm/mixtral-8x7b.yaml b/api/core/model_runtime/model_providers/leptonai/llm/mixtral-8x7b.yaml deleted file mode 100644 index de788ac256..0000000000 --- a/api/core/model_runtime/model_providers/leptonai/llm/mixtral-8x7b.yaml +++ /dev/null @@ -1,20 +0,0 @@ -model: mixtral-8x7b -label: - zh_Hans: mixtral-8x7b - en_US: mixtral-8x7b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 1024 diff --git a/api/core/model_runtime/model_providers/localai/__init__.py b/api/core/model_runtime/model_providers/localai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/localai/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/localai/_assets/icon_l_en.svg deleted file mode 100644 index 251a37fdc7..0000000000 --- a/api/core/model_runtime/model_providers/localai/_assets/icon_l_en.svg +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/localai/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/localai/_assets/icon_s_en.svg deleted file mode 100644 index 9dc6e6276e..0000000000 --- a/api/core/model_runtime/model_providers/localai/_assets/icon_s_en.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/localai/llm/__init__.py b/api/core/model_runtime/model_providers/localai/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/localai/llm/llm.py b/api/core/model_runtime/model_providers/localai/llm/llm.py deleted file mode 100644 index e7295355f6..0000000000 --- a/api/core/model_runtime/model_providers/localai/llm/llm.py +++ /dev/null @@ -1,674 +0,0 @@ -from collections.abc import Generator -from typing import cast - -from httpx import Timeout -from openai import ( - APIConnectionError, - APITimeoutError, - AuthenticationError, - ConflictError, - InternalServerError, - NotFoundError, - OpenAI, - PermissionDeniedError, - RateLimitError, - Stream, - UnprocessableEntityError, -) -from openai.types.chat import ChatCompletion, ChatCompletionChunk -from openai.types.chat.chat_completion_message import FunctionCall -from openai.types.completion import Completion -from yarl import URL - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - ModelPropertyKey, - ModelType, - ParameterRule, - ParameterType, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.utils import helper - - -class LocalAILanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - return self._generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool] | None = None, - ) -> int: - # tools is not supported yet - return self._num_tokens_from_messages(prompt_messages, tools=tools) - - def _num_tokens_from_messages(self, messages: list[PromptMessage], tools: list[PromptMessageTool]) -> int: - """ - Calculate num tokens for baichuan model - LocalAI does not supports - """ - - def tokens(text: str): - """ - We could not determine which tokenizer to use, cause the model is customized. - So we use gpt2 tokenizer to calculate the num tokens for convenience. - """ - return self._get_num_tokens_by_gpt2(text) - - tokens_per_message = 3 - tokens_per_name = 1 - - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - if key == "tool_calls": - for tool_call in value: - for t_key, t_value in tool_call.items(): - num_tokens += tokens(t_key) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += tokens(f_key) - num_tokens += tokens(f_value) - else: - num_tokens += tokens(t_key) - num_tokens += tokens(t_value) - if key == "function_call": - for t_key, t_value in value.items(): - num_tokens += tokens(t_key) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += tokens(f_key) - num_tokens += tokens(f_value) - else: - num_tokens += tokens(t_key) - num_tokens += tokens(t_value) - else: - num_tokens += tokens(str(value)) - - if key == "name": - num_tokens += tokens_per_name - num_tokens += 3 - - if tools: - num_tokens += self._num_tokens_for_tools(tools) - - return num_tokens - - def _num_tokens_for_tools(self, tools: list[PromptMessageTool]) -> int: - """ - Calculate num tokens for tool calling - - :param encoding: encoding - :param tools: tools for tool calling - :return: number of tokens - """ - - def tokens(text: str): - return self._get_num_tokens_by_gpt2(text) - - num_tokens = 0 - for tool in tools: - # calculate num tokens for function object - num_tokens += tokens("name") - num_tokens += tokens(tool.name) - num_tokens += tokens("description") - num_tokens += tokens(tool.description) - parameters = tool.parameters - num_tokens += tokens("parameters") - num_tokens += tokens("type") - num_tokens += tokens(parameters.get("type")) - if "properties" in parameters: - num_tokens += tokens("properties") - for key, value in parameters.get("properties").items(): - num_tokens += tokens(key) - for field_key, field_value in value.items(): - num_tokens += tokens(field_key) - if field_key == "enum": - for enum_field in field_value: - num_tokens += 3 - num_tokens += tokens(enum_field) - else: - num_tokens += tokens(field_key) - num_tokens += tokens(str(field_value)) - if "required" in parameters: - num_tokens += tokens("required") - for required_field in parameters["required"]: - num_tokens += 3 - num_tokens += tokens(required_field) - - return num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke( - model=model, - credentials=credentials, - prompt_messages=[UserPromptMessage(content="ping")], - model_parameters={ - "max_tokens": 10, - }, - stop=[], - stream=False, - ) - except Exception as ex: - raise CredentialsValidateFailedError(f"Invalid credentials {str(ex)}") - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - completion_model = None - if credentials["completion_type"] == "chat_completion": - completion_model = LLMMode.CHAT.value - elif credentials["completion_type"] == "completion": - completion_model = LLMMode.COMPLETION.value - else: - raise ValueError(f"Unknown completion type {credentials['completion_type']}") - - rules = [ - ParameterRule( - name="temperature", - type=ParameterType.FLOAT, - use_template="temperature", - label=I18nObject(zh_Hans="温度", en_US="Temperature"), - ), - ParameterRule( - name="top_p", - type=ParameterType.FLOAT, - use_template="top_p", - label=I18nObject(zh_Hans="Top P", en_US="Top P"), - ), - ParameterRule( - name="max_tokens", - type=ParameterType.INT, - use_template="max_tokens", - min=1, - max=2048, - default=512, - label=I18nObject(zh_Hans="最大生成长度", en_US="Max Tokens"), - ), - ] - - model_properties = ( - { - ModelPropertyKey.MODE: completion_model, - } - if completion_model - else {} - ) - - model_properties[ModelPropertyKey.CONTEXT_SIZE] = int(credentials.get("context_size", "2048")) - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.LLM, - model_properties=model_properties, - parameter_rules=rules, - ) - - return entity - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - kwargs = self._to_client_kwargs(credentials) - # init model client - client = OpenAI(**kwargs) - - model_name = model - completion_type = credentials["completion_type"] - - extra_model_kwargs = { - "timeout": 60, - } - if stop: - extra_model_kwargs["stop"] = stop - - if user: - extra_model_kwargs["user"] = user - - if tools and len(tools) > 0: - extra_model_kwargs["functions"] = [helper.dump_model(tool) for tool in tools] - - if completion_type == "chat_completion": - result = client.chat.completions.create( - messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages], - model=model_name, - stream=stream, - **model_parameters, - **extra_model_kwargs, - ) - elif completion_type == "completion": - result = client.completions.create( - prompt=self._convert_prompt_message_to_completion_prompts(prompt_messages), - model=model, - stream=stream, - **model_parameters, - **extra_model_kwargs, - ) - else: - raise ValueError(f"Unknown completion type {completion_type}") - - if stream: - if completion_type == "completion": - return self._handle_completion_generate_stream_response( - model=model, credentials=credentials, response=result, tools=tools, prompt_messages=prompt_messages - ) - return self._handle_chat_generate_stream_response( - model=model, credentials=credentials, response=result, tools=tools, prompt_messages=prompt_messages - ) - - if completion_type == "completion": - return self._handle_completion_generate_response( - model=model, credentials=credentials, response=result, prompt_messages=prompt_messages - ) - return self._handle_chat_generate_response( - model=model, credentials=credentials, response=result, tools=tools, prompt_messages=prompt_messages - ) - - def _to_client_kwargs(self, credentials: dict) -> dict: - """ - Convert invoke kwargs to client kwargs - - :param credentials: credentials dict - :return: client kwargs - """ - if not credentials["server_url"].endswith("/"): - credentials["server_url"] += "/" - - client_kwargs = { - "timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0), - "api_key": "1", - "base_url": str(URL(credentials["server_url"]) / "v1"), - } - - return client_kwargs - - def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: - """ - Convert PromptMessage to dict for OpenAI Compatibility API - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - raise ValueError("User message content must be str") - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - if message.tool_calls and len(message.tool_calls) > 0: - message_dict["function_call"] = { - "name": message.tool_calls[0].function.name, - "arguments": message.tool_calls[0].function.arguments, - } - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - elif isinstance(message, ToolPromptMessage): - # copy from core/model_runtime/model_providers/anthropic/llm/llm.py - message = cast(ToolPromptMessage, message) - message_dict = { - "role": "user", - "content": [{"type": "tool_result", "tool_use_id": message.tool_call_id, "content": message.content}], - } - else: - raise ValueError(f"Unknown message type {type(message)}") - - return message_dict - - def _convert_prompt_message_to_completion_prompts(self, messages: list[PromptMessage]) -> str: - """ - Convert PromptMessage to completion prompts - """ - prompts = "" - for message in messages: - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - prompts += f"{message.content}\n" - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - prompts += f"{message.content}\n" - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - prompts += f"{message.content}\n" - else: - raise ValueError(f"Unknown message type {type(message)}") - - return prompts - - def _handle_completion_generate_response( - self, - model: str, - prompt_messages: list[PromptMessage], - credentials: dict, - response: Completion, - ) -> LLMResult: - """ - Handle llm chat response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: llm response - """ - if len(response.choices) == 0: - raise InvokeServerUnavailableError("Empty response") - - assistant_message = response.choices[0].text - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=assistant_message, tool_calls=[]) - - prompt_tokens = self._get_num_tokens_by_gpt2( - self._convert_prompt_message_to_completion_prompts(prompt_messages) - ) - completion_tokens = self._num_tokens_from_messages(messages=[assistant_prompt_message], tools=[]) - - usage = self._calc_response_usage( - model=model, credentials=credentials, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens - ) - - response = LLMResult( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=response.system_fingerprint, - usage=usage, - message=assistant_prompt_message, - ) - - return response - - def _handle_chat_generate_response( - self, - model: str, - prompt_messages: list[PromptMessage], - credentials: dict, - response: ChatCompletion, - tools: list[PromptMessageTool], - ) -> LLMResult: - """ - Handle llm chat response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: llm response - """ - if len(response.choices) == 0: - raise InvokeServerUnavailableError("Empty response") - - assistant_message = response.choices[0].message - - # convert function call to tool call - function_calls = assistant_message.function_call - tool_calls = self._extract_response_tool_calls([function_calls] if function_calls else []) - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=assistant_message.content, tool_calls=tool_calls) - - prompt_tokens = self._num_tokens_from_messages(messages=prompt_messages, tools=tools) - completion_tokens = self._num_tokens_from_messages(messages=[assistant_prompt_message], tools=tools) - - usage = self._calc_response_usage( - model=model, credentials=credentials, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens - ) - - response = LLMResult( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=response.system_fingerprint, - usage=usage, - message=assistant_prompt_message, - ) - - return response - - def _handle_completion_generate_stream_response( - self, - model: str, - prompt_messages: list[PromptMessage], - credentials: dict, - response: Stream[Completion], - tools: list[PromptMessageTool], - ) -> Generator: - full_response = "" - - for chunk in response: - if len(chunk.choices) == 0: - continue - - delta = chunk.choices[0] - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=delta.text or "", tool_calls=[]) - - if delta.finish_reason is not None: - # temp_assistant_prompt_message is used to calculate usage - temp_assistant_prompt_message = AssistantPromptMessage(content=full_response, tool_calls=[]) - - prompt_tokens = self._get_num_tokens_by_gpt2( - self._convert_prompt_message_to_completion_prompts(prompt_messages) - ) - - completion_tokens = self._num_tokens_from_messages(messages=[temp_assistant_prompt_message], tools=[]) - - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - ) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - finish_reason=delta.finish_reason, - usage=usage, - ), - ) - else: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=0, - message=assistant_prompt_message, - ), - ) - - full_response += delta.text - - def _handle_chat_generate_stream_response( - self, - model: str, - prompt_messages: list[PromptMessage], - credentials: dict, - response: Stream[ChatCompletionChunk], - tools: list[PromptMessageTool], - ) -> Generator: - full_response = "" - - for chunk in response: - if len(chunk.choices) == 0: - continue - - delta = chunk.choices[0] - - if delta.finish_reason is None and (delta.delta.content is None or delta.delta.content == ""): - continue - - # check if there is a tool call in the response - function_calls = None - if delta.delta.function_call: - function_calls = [delta.delta.function_call] - - assistant_message_tool_calls = self._extract_response_tool_calls(function_calls or []) - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=delta.delta.content or "", tool_calls=assistant_message_tool_calls - ) - - if delta.finish_reason is not None: - # temp_assistant_prompt_message is used to calculate usage - temp_assistant_prompt_message = AssistantPromptMessage( - content=full_response, tool_calls=assistant_message_tool_calls - ) - - prompt_tokens = self._num_tokens_from_messages(messages=prompt_messages, tools=tools) - completion_tokens = self._num_tokens_from_messages(messages=[temp_assistant_prompt_message], tools=[]) - - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - ) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - finish_reason=delta.finish_reason, - usage=usage, - ), - ) - else: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - ), - ) - - full_response += delta.delta.content - - def _extract_response_tool_calls( - self, response_function_calls: list[FunctionCall] - ) -> list[AssistantPromptMessage.ToolCall]: - """ - Extract tool calls from response - - :param response_tool_calls: response tool calls - :return: list of tool calls - """ - tool_calls = [] - if response_function_calls: - for response_tool_call in response_function_calls: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_tool_call.name, arguments=response_tool_call.arguments - ) - - tool_call = AssistantPromptMessage.ToolCall(id=0, type="function", function=function) - tool_calls.append(tool_call) - - return tool_calls - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [ - APIConnectionError, - APITimeoutError, - ], - InvokeServerUnavailableError: [ - InternalServerError, - ConflictError, - NotFoundError, - UnprocessableEntityError, - PermissionDeniedError, - ], - InvokeRateLimitError: [RateLimitError], - InvokeAuthorizationError: [AuthenticationError], - InvokeBadRequestError: [ValueError], - } diff --git a/api/core/model_runtime/model_providers/localai/localai.py b/api/core/model_runtime/model_providers/localai/localai.py deleted file mode 100644 index 4ff898052b..0000000000 --- a/api/core/model_runtime/model_providers/localai/localai.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class LocalAIProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/localai/localai.yaml b/api/core/model_runtime/model_providers/localai/localai.yaml deleted file mode 100644 index 864dd7a30c..0000000000 --- a/api/core/model_runtime/model_providers/localai/localai.yaml +++ /dev/null @@ -1,72 +0,0 @@ -provider: localai -label: - en_US: LocalAI -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#F3F4F6" -help: - title: - en_US: How to deploy LocalAI - zh_Hans: 如何部署 LocalAI - url: - en_US: https://github.com/go-skynet/LocalAI -supported_model_types: - - llm - - text-embedding - - rerank - - speech2text -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: completion_type - show_on: - - variable: __model_type - value: llm - label: - en_US: Completion type - type: select - required: false - default: chat_completion - placeholder: - zh_Hans: 选择对话类型 - en_US: Select completion type - options: - - value: completion - label: - en_US: Completion - zh_Hans: 补全 - - value: chat_completion - label: - en_US: ChatCompletion - zh_Hans: 对话 - - variable: server_url - label: - zh_Hans: 服务器URL - en_US: Server url - type: text-input - required: true - placeholder: - zh_Hans: 在此输入LocalAI的服务器地址,如 http://192.168.1.100:8080 - en_US: Enter the url of your LocalAI, e.g. http://192.168.1.100:8080 - - variable: context_size - show_on: - - variable: __model_type - value: llm - label: - zh_Hans: 上下文大小 - en_US: Context size - placeholder: - zh_Hans: 输入上下文大小 - en_US: Enter context size - required: false - type: text-input diff --git a/api/core/model_runtime/model_providers/localai/rerank/__init__.py b/api/core/model_runtime/model_providers/localai/rerank/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/localai/rerank/rerank.py b/api/core/model_runtime/model_providers/localai/rerank/rerank.py deleted file mode 100644 index 2b0f53bc19..0000000000 --- a/api/core/model_runtime/model_providers/localai/rerank/rerank.py +++ /dev/null @@ -1,134 +0,0 @@ -from json import dumps -from typing import Optional - -import httpx -from requests import post -from yarl import URL - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType -from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.rerank_model import RerankModel - - -class LocalaiRerankModel(RerankModel): - """ - LocalAI rerank model API is compatible with Jina rerank model API. So just copy the JinaRerankModel class code here. - """ - - def _invoke( - self, - model: str, - credentials: dict, - query: str, - docs: list[str], - score_threshold: Optional[float] = None, - top_n: Optional[int] = None, - user: Optional[str] = None, - ) -> RerankResult: - """ - Invoke rerank model - - :param model: model name - :param credentials: model credentials - :param query: search query - :param docs: docs for reranking - :param score_threshold: score threshold - :param top_n: top n documents to return - :param user: unique user id - :return: rerank result - """ - if len(docs) == 0: - return RerankResult(model=model, docs=[]) - - server_url = credentials["server_url"] - model_name = model - - if not server_url: - raise CredentialsValidateFailedError("server_url is required") - if not model_name: - raise CredentialsValidateFailedError("model_name is required") - - url = server_url - headers = {"Authorization": f"Bearer {credentials.get('api_key')}", "Content-Type": "application/json"} - - data = {"model": model_name, "query": query, "documents": docs, "top_n": top_n} - - try: - response = post(str(URL(url) / "rerank"), headers=headers, data=dumps(data), timeout=10) - response.raise_for_status() - results = response.json() - - rerank_documents = [] - for result in results["results"]: - rerank_document = RerankDocument( - index=result["index"], - text=result["document"]["text"], - score=result["relevance_score"], - ) - if score_threshold is None or result["relevance_score"] >= score_threshold: - rerank_documents.append(rerank_document) - - return RerankResult(model=model, docs=rerank_documents) - except httpx.HTTPStatusError as e: - raise InvokeServerUnavailableError(str(e)) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke( - model=model, - credentials=credentials, - query="What is the capital of the United States?", - docs=[ - "Carson City is the capital city of the American state of Nevada. At the 2010 United States " - "Census, Carson City had a population of 55,274.", - "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " - "are a political division controlled by the United States. Its capital is Saipan.", - ], - score_threshold=0.8, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - """ - return { - InvokeConnectionError: [httpx.ConnectError], - InvokeServerUnavailableError: [httpx.RemoteProtocolError], - InvokeRateLimitError: [], - InvokeAuthorizationError: [httpx.HTTPStatusError], - InvokeBadRequestError: [httpx.RequestError], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - generate custom model entities from credentials - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - model_type=ModelType.RERANK, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={}, - ) - - return entity diff --git a/api/core/model_runtime/model_providers/localai/speech2text/__init__.py b/api/core/model_runtime/model_providers/localai/speech2text/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/localai/speech2text/speech2text.py b/api/core/model_runtime/model_providers/localai/speech2text/speech2text.py deleted file mode 100644 index 4b9d0f5bfe..0000000000 --- a/api/core/model_runtime/model_providers/localai/speech2text/speech2text.py +++ /dev/null @@ -1,89 +0,0 @@ -from typing import IO, Optional - -from requests import Request, Session -from yarl import URL - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel - - -class LocalAISpeech2text(Speech2TextModel): - """ - Model class for Local AI Text to speech model. - """ - - def _invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) -> str: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param file: audio file - :param user: unique user id - :return: text for given audio file - """ - - url = str(URL(credentials["server_url"]) / "v1/audio/transcriptions") - data = {"model": model} - files = {"file": file} - - session = Session() - request = Request("POST", url, data=data, files=files) - prepared_request = session.prepare_request(request) - response = session.send(prepared_request) - - if "error" in response.json(): - raise InvokeServerUnavailableError("Empty response") - - return response.json()["text"] - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - audio_file_path = self._get_demo_file_path() - - with open(audio_file_path, "rb") as audio_file: - self._invoke(model, credentials, audio_file) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [InvokeBadRequestError], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.SPEECH2TEXT, - model_properties={}, - parameter_rules=[], - ) - - return entity diff --git a/api/core/model_runtime/model_providers/localai/text_embedding/__init__.py b/api/core/model_runtime/model_providers/localai/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/minimax/__init__.py b/api/core/model_runtime/model_providers/minimax/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/minimax/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/minimax/_assets/icon_l_en.png deleted file mode 100644 index 5066b525f9..0000000000 Binary files a/api/core/model_runtime/model_providers/minimax/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/minimax/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/minimax/_assets/icon_s_en.png deleted file mode 100644 index 30c71e9bd3..0000000000 Binary files a/api/core/model_runtime/model_providers/minimax/_assets/icon_s_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/minimax/llm/__init__.py b/api/core/model_runtime/model_providers/minimax/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/minimax/llm/abab5-chat.yaml b/api/core/model_runtime/model_providers/minimax/llm/abab5-chat.yaml deleted file mode 100644 index 2c1f79e2b7..0000000000 --- a/api/core/model_runtime/model_providers/minimax/llm/abab5-chat.yaml +++ /dev/null @@ -1,38 +0,0 @@ -model: abab5-chat -label: - en_US: Abab5-Chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 6144 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - required: true - default: 6144 - min: 1 - max: 6144 - - name: mask_sensitive_info - type: boolean - default: true - label: - zh_Hans: 隐私保护 - en_US: Moderate - help: - zh_Hans: 对输出中易涉及隐私问题的文本信息进行打码,目前包括但不限于邮箱、域名、链接、证件号、家庭住址等,默认true,即开启打码 - en_US: Mask the sensitive info of the generated content, such as email/domain/link/address/phone/id.. - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0.015' - output: '0.015' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/minimax/llm/abab5.5-chat.yaml b/api/core/model_runtime/model_providers/minimax/llm/abab5.5-chat.yaml deleted file mode 100644 index 6d29be0d0e..0000000000 --- a/api/core/model_runtime/model_providers/minimax/llm/abab5.5-chat.yaml +++ /dev/null @@ -1,53 +0,0 @@ -model: abab5.5-chat -label: - en_US: Abab5.5-Chat -model_type: llm -features: - - agent-thought - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 16384 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.01 - max: 1 - default: 0.9 - - name: top_p - use_template: top_p - min: 0.01 - max: 1 - default: 0.95 - - name: max_tokens - use_template: max_tokens - required: true - default: 6144 - min: 1 - max: 16384 - - name: mask_sensitive_info - type: boolean - default: true - label: - zh_Hans: 隐私保护 - en_US: Moderate - help: - zh_Hans: 对输出中易涉及隐私问题的文本信息进行打码,目前包括但不限于邮箱、域名、链接、证件号、家庭住址等,默认true,即开启打码 - en_US: Mask the sensitive info of the generated content, such as email/domain/link/address/phone/id.. - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: plugin_web_search - required: false - default: false - type: boolean - label: - en_US: Enable Web Search - zh_Hans: 开启网页搜索 -pricing: - input: '0.015' - output: '0.015' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/minimax/llm/abab5.5s-chat.yaml b/api/core/model_runtime/model_providers/minimax/llm/abab5.5s-chat.yaml deleted file mode 100644 index aa42bb5739..0000000000 --- a/api/core/model_runtime/model_providers/minimax/llm/abab5.5s-chat.yaml +++ /dev/null @@ -1,44 +0,0 @@ -model: abab5.5s-chat -label: - en_US: Abab5.5s-Chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.01 - max: 1 - default: 0.9 - - name: top_p - use_template: top_p - min: 0.01 - max: 1 - default: 0.95 - - name: max_tokens - use_template: max_tokens - required: true - default: 3072 - min: 1 - max: 8192 - - name: mask_sensitive_info - type: boolean - default: true - label: - zh_Hans: 隐私保护 - en_US: Moderate - help: - zh_Hans: 对输出中易涉及隐私问题的文本信息进行打码,目前包括但不限于邮箱、域名、链接、证件号、家庭住址等,默认true,即开启打码 - en_US: Mask the sensitive info of the generated content, such as email/domain/link/address/phone/id.. - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0.005' - output: '0.005' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/minimax/llm/abab6-chat.yaml b/api/core/model_runtime/model_providers/minimax/llm/abab6-chat.yaml deleted file mode 100644 index 9188b6b53f..0000000000 --- a/api/core/model_runtime/model_providers/minimax/llm/abab6-chat.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: abab6-chat -label: - en_US: Abab6-Chat -model_type: llm -features: - - agent-thought - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.01 - max: 1 - default: 0.1 - - name: top_p - use_template: top_p - min: 0.01 - max: 1 - default: 0.9 - - name: max_tokens - use_template: max_tokens - required: true - default: 2048 - min: 1 - max: 32768 - - name: mask_sensitive_info - type: boolean - default: true - label: - zh_Hans: 隐私保护 - en_US: Moderate - help: - zh_Hans: 对输出中易涉及隐私问题的文本信息进行打码,目前包括但不限于邮箱、域名、链接、证件号、家庭住址等,默认true,即开启打码 - en_US: Mask the sensitive info of the generated content, such as email/domain/link/address/phone/id.. - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0.1' - output: '0.1' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/minimax/llm/abab6.5-chat.yaml b/api/core/model_runtime/model_providers/minimax/llm/abab6.5-chat.yaml deleted file mode 100644 index 5d717d5f8c..0000000000 --- a/api/core/model_runtime/model_providers/minimax/llm/abab6.5-chat.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: abab6.5-chat -label: - en_US: Abab6.5-Chat -model_type: llm -features: - - agent-thought - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.01 - max: 1 - default: 0.1 - - name: top_p - use_template: top_p - min: 0.01 - max: 1 - default: 0.95 - - name: max_tokens - use_template: max_tokens - required: true - default: 2048 - min: 1 - max: 8192 - - name: mask_sensitive_info - type: boolean - default: true - label: - zh_Hans: 隐私保护 - en_US: Moderate - help: - zh_Hans: 对输出中易涉及隐私问题的文本信息进行打码,目前包括但不限于邮箱、域名、链接、证件号、家庭住址等,默认true,即开启打码 - en_US: Mask the sensitive info of the generated content, such as email/domain/link/address/phone/id.. - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0.03' - output: '0.03' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/minimax/llm/abab6.5s-chat.yaml b/api/core/model_runtime/model_providers/minimax/llm/abab6.5s-chat.yaml deleted file mode 100644 index 4631fe67e4..0000000000 --- a/api/core/model_runtime/model_providers/minimax/llm/abab6.5s-chat.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: abab6.5s-chat -label: - en_US: Abab6.5s-Chat -model_type: llm -features: - - agent-thought - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 245760 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.01 - max: 1 - default: 0.1 - - name: top_p - use_template: top_p - min: 0.01 - max: 1 - default: 0.95 - - name: max_tokens - use_template: max_tokens - required: true - default: 2048 - min: 1 - max: 245760 - - name: mask_sensitive_info - type: boolean - default: true - label: - zh_Hans: 隐私保护 - en_US: Moderate - help: - zh_Hans: 对输出中易涉及隐私问题的文本信息进行打码,目前包括但不限于邮箱、域名、链接、证件号、家庭住址等,默认true,即开启打码 - en_US: Mask the sensitive info of the generated content, such as email/domain/link/address/phone/id.. - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0.01' - output: '0.01' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/minimax/llm/chat_completion.py b/api/core/model_runtime/model_providers/minimax/llm/chat_completion.py deleted file mode 100644 index 88cc0e8e0f..0000000000 --- a/api/core/model_runtime/model_providers/minimax/llm/chat_completion.py +++ /dev/null @@ -1,166 +0,0 @@ -from collections.abc import Generator -from json import dumps, loads -from typing import Any, Union - -from requests import Response, post - -from core.model_runtime.model_providers.minimax.llm.errors import ( - BadRequestError, - InsufficientAccountBalanceError, - InternalServerError, - InvalidAPIKeyError, - InvalidAuthenticationError, - RateLimitReachedError, -) -from core.model_runtime.model_providers.minimax.llm.types import MinimaxMessage - - -class MinimaxChatCompletion: - """ - Minimax Chat Completion API - """ - - def generate( - self, - model: str, - api_key: str, - group_id: str, - prompt_messages: list[MinimaxMessage], - model_parameters: dict, - tools: list[dict[str, Any]], - stop: list[str] | None, - stream: bool, - user: str, - ) -> Union[MinimaxMessage, Generator[MinimaxMessage, None, None]]: - """ - generate chat completion - """ - if not api_key or not group_id: - raise InvalidAPIKeyError("Invalid API key or group ID") - - url = f"https://api.minimax.chat/v1/text/chatcompletion?GroupId={group_id}" - - extra_kwargs = {} - - if "max_tokens" in model_parameters and type(model_parameters["max_tokens"]) == int: - extra_kwargs["tokens_to_generate"] = model_parameters["max_tokens"] - - if "temperature" in model_parameters and type(model_parameters["temperature"]) == float: - extra_kwargs["temperature"] = model_parameters["temperature"] - - if "top_p" in model_parameters and type(model_parameters["top_p"]) == float: - extra_kwargs["top_p"] = model_parameters["top_p"] - - prompt = "你是一个什么都懂的专家" - - role_meta = {"user_name": "我", "bot_name": "专家"} - - # check if there is a system message - if len(prompt_messages) == 0: - raise BadRequestError("At least one message is required") - - if prompt_messages[0].role == MinimaxMessage.Role.SYSTEM.value: - if prompt_messages[0].content: - prompt = prompt_messages[0].content - prompt_messages = prompt_messages[1:] - - # check if there is a user message - if len(prompt_messages) == 0: - raise BadRequestError("At least one user message is required") - - messages = [ - { - "sender_type": message.role, - "text": message.content, - } - for message in prompt_messages - ] - - headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"} - - body = { - "model": model, - "messages": messages, - "prompt": prompt, - "role_meta": role_meta, - "stream": stream, - **extra_kwargs, - } - - try: - response = post(url=url, data=dumps(body), headers=headers, stream=stream, timeout=(10, 300)) - except Exception as e: - raise InternalServerError(e) - - if response.status_code != 200: - raise InternalServerError(response.text) - - if stream: - return self._handle_stream_chat_generate_response(response) - return self._handle_chat_generate_response(response) - - def _handle_error(self, code: int, msg: str): - if code in {1000, 1001, 1013, 1027}: - raise InternalServerError(msg) - elif code in {1002, 1039}: - raise RateLimitReachedError(msg) - elif code == 1004: - raise InvalidAuthenticationError(msg) - elif code == 1008: - raise InsufficientAccountBalanceError(msg) - elif code == 2013: - raise BadRequestError(msg) - else: - raise InternalServerError(msg) - - def _handle_chat_generate_response(self, response: Response) -> MinimaxMessage: - """ - handle chat generate response - """ - response = response.json() - if "base_resp" in response and response["base_resp"]["status_code"] != 0: - code = response["base_resp"]["status_code"] - msg = response["base_resp"]["status_msg"] - self._handle_error(code, msg) - - message = MinimaxMessage(content=response["reply"], role=MinimaxMessage.Role.ASSISTANT.value) - message.usage = { - "prompt_tokens": 0, - "completion_tokens": response["usage"]["total_tokens"], - "total_tokens": response["usage"]["total_tokens"], - } - message.stop_reason = response["choices"][0]["finish_reason"] - return message - - def _handle_stream_chat_generate_response(self, response: Response) -> Generator[MinimaxMessage, None, None]: - """ - handle stream chat generate response - """ - for line in response.iter_lines(): - if not line: - continue - line: str = line.decode("utf-8") - if line.startswith("data: "): - line = line[6:].strip() - data = loads(line) - - if "base_resp" in data and data["base_resp"]["status_code"] != 0: - code = data["base_resp"]["status_code"] - msg = data["base_resp"]["status_msg"] - self._handle_error(code, msg) - - if data["reply"]: - total_tokens = data["usage"]["total_tokens"] - message = MinimaxMessage(role=MinimaxMessage.Role.ASSISTANT.value, content="") - message.usage = {"prompt_tokens": 0, "completion_tokens": total_tokens, "total_tokens": total_tokens} - message.stop_reason = data["choices"][0]["finish_reason"] - yield message - return - - choices = data.get("choices", []) - if len(choices) == 0: - continue - - for choice in choices: - message = choice["delta"] - yield MinimaxMessage(content=message, role=MinimaxMessage.Role.ASSISTANT.value) diff --git a/api/core/model_runtime/model_providers/minimax/llm/chat_completion_pro.py b/api/core/model_runtime/model_providers/minimax/llm/chat_completion_pro.py deleted file mode 100644 index 8b8fdbb6bd..0000000000 --- a/api/core/model_runtime/model_providers/minimax/llm/chat_completion_pro.py +++ /dev/null @@ -1,191 +0,0 @@ -from collections.abc import Generator -from json import dumps, loads -from typing import Any, Union - -from requests import Response, post - -from core.model_runtime.model_providers.minimax.llm.errors import ( - BadRequestError, - InsufficientAccountBalanceError, - InternalServerError, - InvalidAPIKeyError, - InvalidAuthenticationError, - RateLimitReachedError, -) -from core.model_runtime.model_providers.minimax.llm.types import MinimaxMessage - - -class MinimaxChatCompletionPro: - """ - Minimax Chat Completion Pro API, supports function calling - however, we do not have enough time and energy to implement it, but the parameters are reserved - """ - - def generate( - self, - model: str, - api_key: str, - group_id: str, - prompt_messages: list[MinimaxMessage], - model_parameters: dict, - tools: list[dict[str, Any]], - stop: list[str] | None, - stream: bool, - user: str, - ) -> Union[MinimaxMessage, Generator[MinimaxMessage, None, None]]: - """ - generate chat completion - """ - if not api_key or not group_id: - raise InvalidAPIKeyError("Invalid API key or group ID") - - url = f"https://api.minimax.chat/v1/text/chatcompletion_pro?GroupId={group_id}" - - extra_kwargs = {} - - if "max_tokens" in model_parameters and type(model_parameters["max_tokens"]) == int: - extra_kwargs["tokens_to_generate"] = model_parameters["max_tokens"] - - if "temperature" in model_parameters and type(model_parameters["temperature"]) == float: - extra_kwargs["temperature"] = model_parameters["temperature"] - - if "top_p" in model_parameters and type(model_parameters["top_p"]) == float: - extra_kwargs["top_p"] = model_parameters["top_p"] - - if "mask_sensitive_info" in model_parameters and type(model_parameters["mask_sensitive_info"]) == bool: - extra_kwargs["mask_sensitive_info"] = model_parameters["mask_sensitive_info"] - - if model_parameters.get("plugin_web_search"): - extra_kwargs["plugins"] = ["plugin_web_search"] - - bot_setting = {"bot_name": "专家", "content": "你是一个什么都懂的专家"} - - reply_constraints = {"sender_type": "BOT", "sender_name": "专家"} - - # check if there is a system message - if len(prompt_messages) == 0: - raise BadRequestError("At least one message is required") - - if prompt_messages[0].role == MinimaxMessage.Role.SYSTEM.value: - if prompt_messages[0].content: - bot_setting["content"] = prompt_messages[0].content - prompt_messages = prompt_messages[1:] - - # check if there is a user message - if len(prompt_messages) == 0: - raise BadRequestError("At least one user message is required") - - messages = [message.to_dict() for message in prompt_messages] - - headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"} - - body = { - "model": model, - "messages": messages, - "bot_setting": [bot_setting], - "reply_constraints": reply_constraints, - "stream": stream, - **extra_kwargs, - } - - if tools: - body["functions"] = tools - body["function_call"] = {"type": "auto"} - - try: - response = post(url=url, data=dumps(body), headers=headers, stream=stream, timeout=(10, 300)) - except Exception as e: - raise InternalServerError(e) - - if response.status_code != 200: - raise InternalServerError(response.text) - - if stream: - return self._handle_stream_chat_generate_response(response) - return self._handle_chat_generate_response(response) - - def _handle_error(self, code: int, msg: str): - if code in {1000, 1001, 1013, 1027}: - raise InternalServerError(msg) - elif code in {1002, 1039}: - raise RateLimitReachedError(msg) - elif code == 1004: - raise InvalidAuthenticationError(msg) - elif code == 1008: - raise InsufficientAccountBalanceError(msg) - elif code == 2013: - raise BadRequestError(msg) - else: - raise InternalServerError(msg) - - def _handle_chat_generate_response(self, response: Response) -> MinimaxMessage: - """ - handle chat generate response - """ - response = response.json() - if "base_resp" in response and response["base_resp"]["status_code"] != 0: - code = response["base_resp"]["status_code"] - msg = response["base_resp"]["status_msg"] - self._handle_error(code, msg) - - message = MinimaxMessage(content=response["reply"], role=MinimaxMessage.Role.ASSISTANT.value) - message.usage = { - "prompt_tokens": 0, - "completion_tokens": response["usage"]["total_tokens"], - "total_tokens": response["usage"]["total_tokens"], - } - message.stop_reason = response["choices"][0]["finish_reason"] - return message - - def _handle_stream_chat_generate_response(self, response: Response) -> Generator[MinimaxMessage, None, None]: - """ - handle stream chat generate response - """ - for line in response.iter_lines(): - if not line: - continue - line: str = line.decode("utf-8") - if line.startswith("data: "): - line = line[6:].strip() - data = loads(line) - - if "base_resp" in data and data["base_resp"]["status_code"] != 0: - code = data["base_resp"]["status_code"] - msg = data["base_resp"]["status_msg"] - self._handle_error(code, msg) - - # final chunk - if data["reply"] or data.get("usage"): - total_tokens = data["usage"]["total_tokens"] - minimax_message = MinimaxMessage(role=MinimaxMessage.Role.ASSISTANT.value, content="") - minimax_message.usage = { - "prompt_tokens": 0, - "completion_tokens": total_tokens, - "total_tokens": total_tokens, - } - minimax_message.stop_reason = data["choices"][0]["finish_reason"] - - choices = data.get("choices", []) - if len(choices) > 0: - for choice in choices: - message = choice["messages"][0] - # append function_call message - if "function_call" in message: - function_call_message = MinimaxMessage(content="", role=MinimaxMessage.Role.ASSISTANT.value) - function_call_message.function_call = message["function_call"] - yield function_call_message - - yield minimax_message - return - - # partial chunk - choices = data.get("choices", []) - if len(choices) == 0: - continue - - for choice in choices: - message = choice["messages"][0] - # append text message - if "text" in message: - minimax_message = MinimaxMessage(content=message["text"], role=MinimaxMessage.Role.ASSISTANT.value) - yield minimax_message diff --git a/api/core/model_runtime/model_providers/minimax/llm/errors.py b/api/core/model_runtime/model_providers/minimax/llm/errors.py deleted file mode 100644 index 309b5cf413..0000000000 --- a/api/core/model_runtime/model_providers/minimax/llm/errors.py +++ /dev/null @@ -1,22 +0,0 @@ -class InvalidAuthenticationError(Exception): - pass - - -class InvalidAPIKeyError(Exception): - pass - - -class RateLimitReachedError(Exception): - pass - - -class InsufficientAccountBalanceError(Exception): - pass - - -class InternalServerError(Exception): - pass - - -class BadRequestError(Exception): - pass diff --git a/api/core/model_runtime/model_providers/minimax/llm/llm.py b/api/core/model_runtime/model_providers/minimax/llm/llm.py deleted file mode 100644 index 4250c40cfb..0000000000 --- a/api/core/model_runtime/model_providers/minimax/llm/llm.py +++ /dev/null @@ -1,271 +0,0 @@ -from collections.abc import Generator - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.minimax.llm.chat_completion import MinimaxChatCompletion -from core.model_runtime.model_providers.minimax.llm.chat_completion_pro import MinimaxChatCompletionPro -from core.model_runtime.model_providers.minimax.llm.errors import ( - BadRequestError, - InsufficientAccountBalanceError, - InternalServerError, - InvalidAPIKeyError, - InvalidAuthenticationError, - RateLimitReachedError, -) -from core.model_runtime.model_providers.minimax.llm.types import MinimaxMessage - - -class MinimaxLargeLanguageModel(LargeLanguageModel): - model_apis = { - "abab6.5s-chat": MinimaxChatCompletionPro, - "abab6.5-chat": MinimaxChatCompletionPro, - "abab6-chat": MinimaxChatCompletionPro, - "abab5.5s-chat": MinimaxChatCompletionPro, - "abab5.5-chat": MinimaxChatCompletionPro, - "abab5-chat": MinimaxChatCompletion, - } - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - return self._generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate credentials for Baichuan model - """ - if model not in self.model_apis: - raise CredentialsValidateFailedError(f"Invalid model: {model}") - - if not credentials.get("minimax_api_key"): - raise CredentialsValidateFailedError("Invalid API key") - - if not credentials.get("minimax_group_id"): - raise CredentialsValidateFailedError("Invalid group ID") - - # ping - instance = MinimaxChatCompletionPro() - try: - instance.generate( - model=model, - api_key=credentials["minimax_api_key"], - group_id=credentials["minimax_group_id"], - prompt_messages=[MinimaxMessage(content="ping", role="USER")], - model_parameters={}, - tools=[], - stop=[], - stream=False, - user="", - ) - except (InvalidAuthenticationError, InsufficientAccountBalanceError) as e: - raise CredentialsValidateFailedError(f"Invalid API key: {e}") - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool] | None = None, - ) -> int: - return self._num_tokens_from_messages(prompt_messages, tools) - - def _num_tokens_from_messages(self, messages: list[PromptMessage], tools: list[PromptMessageTool]) -> int: - """ - Calculate num tokens for minimax model - - not like ChatGLM, Minimax has a special prompt structure, we could not find a proper way - to calculate the num tokens, so we use str() to convert the prompt to string - - Minimax does not provide their own tokenizer of adab5.5 and abab5 model - therefore, we use gpt2 tokenizer instead - """ - messages_dict = [self._convert_prompt_message_to_minimax_message(m).to_dict() for m in messages] - return self._get_num_tokens_by_gpt2(str(messages_dict)) - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - """ - use MinimaxChatCompletionPro as the type of client, anyway, MinimaxChatCompletion has the same interface - """ - client: MinimaxChatCompletionPro = self.model_apis[model]() - - if tools: - tools = [ - {"name": tool.name, "description": tool.description, "parameters": tool.parameters} for tool in tools - ] - - response = client.generate( - model=model, - api_key=credentials["minimax_api_key"], - group_id=credentials["minimax_group_id"], - prompt_messages=[self._convert_prompt_message_to_minimax_message(message) for message in prompt_messages], - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - if stream: - return self._handle_chat_generate_stream_response( - model=model, prompt_messages=prompt_messages, credentials=credentials, response=response - ) - return self._handle_chat_generate_response( - model=model, prompt_messages=prompt_messages, credentials=credentials, response=response - ) - - def _convert_prompt_message_to_minimax_message(self, prompt_message: PromptMessage) -> MinimaxMessage: - """ - convert PromptMessage to MinimaxMessage so that we can use MinimaxChatCompletionPro interface - """ - if isinstance(prompt_message, SystemPromptMessage): - return MinimaxMessage(role=MinimaxMessage.Role.SYSTEM.value, content=prompt_message.content) - elif isinstance(prompt_message, UserPromptMessage): - return MinimaxMessage(role=MinimaxMessage.Role.USER.value, content=prompt_message.content) - elif isinstance(prompt_message, AssistantPromptMessage): - if prompt_message.tool_calls: - message = MinimaxMessage(role=MinimaxMessage.Role.ASSISTANT.value, content="") - message.function_call = { - "name": prompt_message.tool_calls[0].function.name, - "arguments": prompt_message.tool_calls[0].function.arguments, - } - return message - return MinimaxMessage(role=MinimaxMessage.Role.ASSISTANT.value, content=prompt_message.content) - elif isinstance(prompt_message, ToolPromptMessage): - return MinimaxMessage(role=MinimaxMessage.Role.FUNCTION.value, content=prompt_message.content) - else: - raise NotImplementedError(f"Prompt message type {type(prompt_message)} is not supported") - - def _handle_chat_generate_response( - self, model: str, prompt_messages: list[PromptMessage], credentials: dict, response: MinimaxMessage - ) -> LLMResult: - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=response.usage["prompt_tokens"], - completion_tokens=response.usage["completion_tokens"], - ) - return LLMResult( - model=model, - prompt_messages=prompt_messages, - message=AssistantPromptMessage( - content=response.content, - tool_calls=[], - ), - usage=usage, - ) - - def _handle_chat_generate_stream_response( - self, - model: str, - prompt_messages: list[PromptMessage], - credentials: dict, - response: Generator[MinimaxMessage, None, None], - ) -> Generator[LLMResultChunk, None, None]: - for message in response: - if message.usage: - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=message.usage["prompt_tokens"], - completion_tokens=message.usage["completion_tokens"], - ) - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=message.content, tool_calls=[]), - usage=usage, - finish_reason=message.stop_reason or None, - ), - ) - elif message.function_call: - if "name" not in message.function_call or "arguments" not in message.function_call: - continue - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage( - content="", - tool_calls=[ - AssistantPromptMessage.ToolCall( - id="", - type="function", - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=message.function_call["name"], arguments=message.function_call["arguments"] - ), - ) - ], - ), - ), - ) - else: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=message.content, tool_calls=[]), - finish_reason=message.stop_reason or None, - ), - ) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [], - InvokeServerUnavailableError: [InternalServerError], - InvokeRateLimitError: [RateLimitReachedError], - InvokeAuthorizationError: [ - InvalidAuthenticationError, - InsufficientAccountBalanceError, - InvalidAPIKeyError, - ], - InvokeBadRequestError: [BadRequestError, KeyError], - } diff --git a/api/core/model_runtime/model_providers/minimax/llm/types.py b/api/core/model_runtime/model_providers/minimax/llm/types.py deleted file mode 100644 index 88ebe5e2e0..0000000000 --- a/api/core/model_runtime/model_providers/minimax/llm/types.py +++ /dev/null @@ -1,30 +0,0 @@ -from enum import Enum -from typing import Any - - -class MinimaxMessage: - class Role(Enum): - USER = "USER" - ASSISTANT = "BOT" - SYSTEM = "SYSTEM" - FUNCTION = "FUNCTION" - - role: str = Role.USER.value - content: str - usage: dict[str, int] = None - stop_reason: str = "" - function_call: dict[str, Any] = None - - def to_dict(self) -> dict[str, Any]: - if self.function_call and self.role == MinimaxMessage.Role.ASSISTANT.value: - return {"sender_type": "BOT", "sender_name": "专家", "text": "", "function_call": self.function_call} - - return { - "sender_type": self.role, - "sender_name": "我" if self.role == "USER" else "专家", - "text": self.content, - } - - def __init__(self, content: str, role: str = "USER") -> None: - self.content = content - self.role = role diff --git a/api/core/model_runtime/model_providers/minimax/minimax.py b/api/core/model_runtime/model_providers/minimax/minimax.py deleted file mode 100644 index 5a761903a1..0000000000 --- a/api/core/model_runtime/model_providers/minimax/minimax.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class MinimaxProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `abab5.5-chat` model for validate, - model_instance.validate_credentials(model="abab5.5-chat", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise CredentialsValidateFailedError(f"{ex}") diff --git a/api/core/model_runtime/model_providers/minimax/minimax.yaml b/api/core/model_runtime/model_providers/minimax/minimax.yaml deleted file mode 100644 index 0a97ff9bb9..0000000000 --- a/api/core/model_runtime/model_providers/minimax/minimax.yaml +++ /dev/null @@ -1,37 +0,0 @@ -provider: minimax -label: - en_US: Minimax -icon_small: - en_US: icon_s_en.png -icon_large: - en_US: icon_l_en.png -background: "#FFEFEF" -help: - title: - en_US: Get your API Key from Minimax - zh_Hans: 从 Minimax 获取您的 API Key - url: - en_US: https://api.minimax.chat/user-center/basic-information/interface-key -supported_model_types: - - llm - - text-embedding -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: minimax_api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: minimax_group_id - label: - en_US: Group ID - type: text-input - required: true - placeholder: - zh_Hans: 在此输入您的 Group ID - en_US: Enter your group ID diff --git a/api/core/model_runtime/model_providers/minimax/text_embedding/__init__.py b/api/core/model_runtime/model_providers/minimax/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/minimax/text_embedding/embo-01.yaml b/api/core/model_runtime/model_providers/minimax/text_embedding/embo-01.yaml deleted file mode 100644 index 33546eafd3..0000000000 --- a/api/core/model_runtime/model_providers/minimax/text_embedding/embo-01.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: embo-01 -model_type: text-embedding -model_properties: - context_size: 4096 - max_chunks: 1 -pricing: - input: '0.0005' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/mistralai/__init__.py b/api/core/model_runtime/model_providers/mistralai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/mistralai/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/mistralai/_assets/icon_l_en.png deleted file mode 100644 index f019b1edce..0000000000 Binary files a/api/core/model_runtime/model_providers/mistralai/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/mistralai/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/mistralai/_assets/icon_s_en.png deleted file mode 100644 index de27b57512..0000000000 Binary files a/api/core/model_runtime/model_providers/mistralai/_assets/icon_s_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/mistralai/llm/_position.yaml b/api/core/model_runtime/model_providers/mistralai/llm/_position.yaml deleted file mode 100644 index bdb06b7fff..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/_position.yaml +++ /dev/null @@ -1,11 +0,0 @@ -- pixtral-12b-2409 -- codestral-latest -- mistral-embed -- open-mistral-nemo -- open-codestral-mamba -- open-mistral-7b -- open-mixtral-8x7b -- open-mixtral-8x22b -- mistral-small-latest -- mistral-medium-latest -- mistral-large-latest diff --git a/api/core/model_runtime/model_providers/mistralai/llm/codestral-latest.yaml b/api/core/model_runtime/model_providers/mistralai/llm/codestral-latest.yaml deleted file mode 100644 index 5f1260233f..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/codestral-latest.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: codestral-latest -label: - zh_Hans: codestral-latest - en_US: codestral-latest -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 4096 - - name: safe_prompt - default: false - type: boolean - help: - en_US: Whether to inject a safety prompt before all conversations. - zh_Hans: 是否开启提示词审查 - label: - en_US: SafePrompt - zh_Hans: 提示词审查 - - name: random_seed - type: int - help: - en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. - zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 - label: - en_US: RandomSeed - zh_Hans: 随机数种子 - default: 0 - min: 0 - max: 2147483647 -pricing: - input: '0.008' - output: '0.024' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/llm.py b/api/core/model_runtime/model_providers/mistralai/llm/llm.py deleted file mode 100644 index da60bd7661..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/llm.py +++ /dev/null @@ -1,36 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union - -from core.model_runtime.entities.llm_entities import LLMResult -from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel - - -class MistralAILargeLanguageModel(OAIAPICompatLargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._add_custom_parameters(credentials) - - # mistral dose not support user/stop arguments - stop = [] - user = None - - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials) - super().validate_credentials(model, credentials) - - @staticmethod - def _add_custom_parameters(credentials: dict) -> None: - credentials["mode"] = "chat" - credentials["endpoint_url"] = "https://api.mistral.ai/v1" diff --git a/api/core/model_runtime/model_providers/mistralai/llm/mistral-embed.yaml b/api/core/model_runtime/model_providers/mistralai/llm/mistral-embed.yaml deleted file mode 100644 index d759103d08..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/mistral-embed.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: mistral-embed -label: - zh_Hans: mistral-embed - en_US: mistral-embed -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 1024 - - name: safe_prompt - default: false - type: boolean - help: - en_US: Whether to inject a safety prompt before all conversations. - zh_Hans: 是否开启提示词审查 - label: - en_US: SafePrompt - zh_Hans: 提示词审查 - - name: random_seed - type: int - help: - en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. - zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 - label: - en_US: RandomSeed - zh_Hans: 随机数种子 - default: 0 - min: 0 - max: 2147483647 -pricing: - input: '0.008' - output: '0.024' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/mistral-large-latest.yaml b/api/core/model_runtime/model_providers/mistralai/llm/mistral-large-latest.yaml deleted file mode 100644 index a0d07a2bf8..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/mistral-large-latest.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: mistral-large-latest -label: - zh_Hans: mistral-large-latest - en_US: mistral-large-latest -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 8000 - - name: safe_prompt - default: false - type: boolean - help: - en_US: Whether to inject a safety prompt before all conversations. - zh_Hans: 是否开启提示词审查 - label: - en_US: SafePrompt - zh_Hans: 提示词审查 - - name: random_seed - type: int - help: - en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. - zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 - label: - en_US: RandomSeed - zh_Hans: 随机数种子 - default: 0 - min: 0 - max: 2147483647 -pricing: - input: '0.008' - output: '0.024' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/mistral-medium-latest.yaml b/api/core/model_runtime/model_providers/mistralai/llm/mistral-medium-latest.yaml deleted file mode 100644 index 7c7440894c..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/mistral-medium-latest.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: mistral-medium-latest -label: - zh_Hans: mistral-medium-latest - en_US: mistral-medium-latest -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 8000 - - name: safe_prompt - default: false - type: boolean - help: - en_US: Whether to inject a safety prompt before all conversations. - zh_Hans: 是否开启提示词审查 - label: - en_US: SafePrompt - zh_Hans: 提示词审查 - - name: random_seed - type: int - help: - en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. - zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 - label: - en_US: RandomSeed - zh_Hans: 随机数种子 - default: 0 - min: 0 - max: 2147483647 -pricing: - input: '0.0027' - output: '0.0081' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/mistral-small-latest.yaml b/api/core/model_runtime/model_providers/mistralai/llm/mistral-small-latest.yaml deleted file mode 100644 index 865e610226..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/mistral-small-latest.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: mistral-small-latest -label: - zh_Hans: mistral-small-latest - en_US: mistral-small-latest -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 8000 - - name: safe_prompt - default: false - type: boolean - help: - en_US: Whether to inject a safety prompt before all conversations. - zh_Hans: 是否开启提示词审查 - label: - en_US: SafePrompt - zh_Hans: 提示词审查 - - name: random_seed - type: int - help: - en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. - zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 - label: - en_US: RandomSeed - zh_Hans: 随机数种子 - default: 0 - min: 0 - max: 2147483647 -pricing: - input: '0.002' - output: '0.006' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/open-codestral-mamba.yaml b/api/core/model_runtime/model_providers/mistralai/llm/open-codestral-mamba.yaml deleted file mode 100644 index d7ffb9ea02..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/open-codestral-mamba.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: open-codestral-mamba -label: - zh_Hans: open-codestral-mamba - en_US: open-codestral-mamba -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 256000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 16384 - - name: safe_prompt - default: false - type: boolean - help: - en_US: Whether to inject a safety prompt before all conversations. - zh_Hans: 是否开启提示词审查 - label: - en_US: SafePrompt - zh_Hans: 提示词审查 - - name: random_seed - type: int - help: - en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. - zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 - label: - en_US: RandomSeed - zh_Hans: 随机数种子 - default: 0 - min: 0 - max: 2147483647 -pricing: - input: '0.008' - output: '0.024' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/open-mistral-7b.yaml b/api/core/model_runtime/model_providers/mistralai/llm/open-mistral-7b.yaml deleted file mode 100644 index ac29226959..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/open-mistral-7b.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: open-mistral-7b -label: - zh_Hans: open-mistral-7b - en_US: open-mistral-7b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 2048 - - name: safe_prompt - default: false - type: boolean - help: - en_US: Whether to inject a safety prompt before all conversations. - zh_Hans: 是否开启提示词审查 - label: - en_US: SafePrompt - zh_Hans: 提示词审查 - - name: random_seed - type: int - help: - en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. - zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 - label: - en_US: RandomSeed - zh_Hans: 随机数种子 - default: 0 - min: 0 - max: 2147483647 -pricing: - input: '0.00025' - output: '0.00025' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/open-mistral-nemo.yaml b/api/core/model_runtime/model_providers/mistralai/llm/open-mistral-nemo.yaml deleted file mode 100644 index dcda4fbce7..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/open-mistral-nemo.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: open-mistral-nemo -label: - zh_Hans: open-mistral-nemo - en_US: open-mistral-nemo -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 8192 - - name: safe_prompt - default: false - type: boolean - help: - en_US: Whether to inject a safety prompt before all conversations. - zh_Hans: 是否开启提示词审查 - label: - en_US: SafePrompt - zh_Hans: 提示词审查 - - name: random_seed - type: int - help: - en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. - zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 - label: - en_US: RandomSeed - zh_Hans: 随机数种子 - default: 0 - min: 0 - max: 2147483647 -pricing: - input: '0.008' - output: '0.024' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/open-mixtral-8x22b.yaml b/api/core/model_runtime/model_providers/mistralai/llm/open-mixtral-8x22b.yaml deleted file mode 100644 index 325fafd497..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/open-mixtral-8x22b.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: open-mixtral-8x22b -label: - zh_Hans: open-mixtral-8x22b - en_US: open-mixtral-8x22b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 64000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 8000 - - name: safe_prompt - default: false - type: boolean - help: - en_US: Whether to inject a safety prompt before all conversations. - zh_Hans: 是否开启提示词审查 - label: - en_US: SafePrompt - zh_Hans: 提示词审查 - - name: random_seed - type: int - help: - en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. - zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 - label: - en_US: RandomSeed - zh_Hans: 随机数种子 - default: 0 - min: 0 - max: 2147483647 -pricing: - input: '0.002' - output: '0.006' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/open-mixtral-8x7b.yaml b/api/core/model_runtime/model_providers/mistralai/llm/open-mixtral-8x7b.yaml deleted file mode 100644 index d217e5e7e9..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/open-mixtral-8x7b.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: open-mixtral-8x7b -label: - zh_Hans: open-mixtral-8x7b - en_US: open-mixtral-8x7b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 8000 - - name: safe_prompt - default: false - type: boolean - help: - en_US: Whether to inject a safety prompt before all conversations. - zh_Hans: 是否开启提示词审查 - label: - en_US: SafePrompt - zh_Hans: 提示词审查 - - name: random_seed - type: int - help: - en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. - zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 - label: - en_US: RandomSeed - zh_Hans: 随机数种子 - default: 0 - min: 0 - max: 2147483647 -pricing: - input: '0.0007' - output: '0.0007' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/llm/pixtral-12b-2409.yaml b/api/core/model_runtime/model_providers/mistralai/llm/pixtral-12b-2409.yaml deleted file mode 100644 index 0b002b49ca..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/llm/pixtral-12b-2409.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: pixtral-12b-2409 -label: - zh_Hans: pixtral-12b-2409 - en_US: pixtral-12b-2409 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 8192 - - name: safe_prompt - default: false - type: boolean - help: - en_US: Whether to inject a safety prompt before all conversations. - zh_Hans: 是否开启提示词审查 - label: - en_US: SafePrompt - zh_Hans: 提示词审查 - - name: random_seed - type: int - help: - en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. - zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 - label: - en_US: RandomSeed - zh_Hans: 随机数种子 - default: 0 - min: 0 - max: 2147483647 -pricing: - input: '0.008' - output: '0.024' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/mistralai/mistralai.py b/api/core/model_runtime/model_providers/mistralai/mistralai.py deleted file mode 100644 index 7f9db8da1c..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/mistralai.py +++ /dev/null @@ -1,26 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class MistralAIProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - model_instance.validate_credentials(model="open-mistral-7b", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/mistralai/mistralai.yaml b/api/core/model_runtime/model_providers/mistralai/mistralai.yaml deleted file mode 100644 index c9b4226ea6..0000000000 --- a/api/core/model_runtime/model_providers/mistralai/mistralai.yaml +++ /dev/null @@ -1,31 +0,0 @@ -provider: mistralai -label: - en_US: MistralAI -description: - en_US: Models provided by MistralAI, such as open-mistral-7b and mistral-large-latest. - zh_Hans: MistralAI 提供的模型,例如 open-mistral-7b 和 mistral-large-latest。 -icon_small: - en_US: icon_s_en.png -icon_large: - en_US: icon_l_en.png -background: "#FFFFFF" -help: - title: - en_US: Get your API Key from MistralAI - zh_Hans: 从 MistralAI 获取 API Key - url: - en_US: https://console.mistral.ai/api-keys/ -supported_model_types: - - llm -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/mixedbread/__init__.py b/api/core/model_runtime/model_providers/mixedbread/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/mixedbread/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/mixedbread/_assets/icon_l_en.png deleted file mode 100644 index 2027611bd5..0000000000 Binary files a/api/core/model_runtime/model_providers/mixedbread/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/mixedbread/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/mixedbread/_assets/icon_s_en.png deleted file mode 100644 index 5c357bddbd..0000000000 Binary files a/api/core/model_runtime/model_providers/mixedbread/_assets/icon_s_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/mixedbread/mixedbread.py b/api/core/model_runtime/model_providers/mixedbread/mixedbread.py deleted file mode 100644 index 3c78150e6f..0000000000 --- a/api/core/model_runtime/model_providers/mixedbread/mixedbread.py +++ /dev/null @@ -1,27 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class MixedBreadProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.TEXT_EMBEDDING) - - # Use `mxbai-embed-large-v1` model for validate, - model_instance.validate_credentials(model="mxbai-embed-large-v1", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/mixedbread/mixedbread.yaml b/api/core/model_runtime/model_providers/mixedbread/mixedbread.yaml deleted file mode 100644 index 2f43aea6ad..0000000000 --- a/api/core/model_runtime/model_providers/mixedbread/mixedbread.yaml +++ /dev/null @@ -1,31 +0,0 @@ -provider: mixedbread -label: - en_US: MixedBread -description: - en_US: Embedding and Rerank Model Supported -icon_small: - en_US: icon_s_en.png -icon_large: - en_US: icon_l_en.png -background: "#EFFDFD" -help: - title: - en_US: Get your API key from MixedBread AI - zh_Hans: 从 MixedBread 获取 API Key - url: - en_US: https://www.mixedbread.ai/ -supported_model_types: - - text-embedding - - rerank -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/mixedbread/rerank/__init__.py b/api/core/model_runtime/model_providers/mixedbread/rerank/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/mixedbread/rerank/mxbai-rerank-large-v1-en.yaml b/api/core/model_runtime/model_providers/mixedbread/rerank/mxbai-rerank-large-v1-en.yaml deleted file mode 100644 index beda219953..0000000000 --- a/api/core/model_runtime/model_providers/mixedbread/rerank/mxbai-rerank-large-v1-en.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: mxbai-rerank-large-v1 -model_type: rerank -model_properties: - context_size: 512 diff --git a/api/core/model_runtime/model_providers/mixedbread/rerank/rerank.py b/api/core/model_runtime/model_providers/mixedbread/rerank/rerank.py deleted file mode 100644 index bf3c12fd86..0000000000 --- a/api/core/model_runtime/model_providers/mixedbread/rerank/rerank.py +++ /dev/null @@ -1,125 +0,0 @@ -from typing import Optional - -import httpx - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType -from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.rerank_model import RerankModel - - -class MixedBreadRerankModel(RerankModel): - """ - Model class for MixedBread rerank model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - query: str, - docs: list[str], - score_threshold: Optional[float] = None, - top_n: Optional[int] = None, - user: Optional[str] = None, - ) -> RerankResult: - """ - Invoke rerank model - - :param model: model name - :param credentials: model credentials - :param query: search query - :param docs: docs for reranking - :param score_threshold: score threshold - :param top_n: top n documents to return - :param user: unique user id - :return: rerank result - """ - if len(docs) == 0: - return RerankResult(model=model, docs=[]) - - base_url = credentials.get("base_url", "https://api.mixedbread.ai/v1") - base_url = base_url.removesuffix("/") - - try: - response = httpx.post( - base_url + "/reranking", - json={"model": model, "query": query, "input": docs, "top_k": top_n, "return_input": True}, - headers={"Authorization": f"Bearer {credentials.get('api_key')}", "Content-Type": "application/json"}, - ) - response.raise_for_status() - results = response.json() - - rerank_documents = [] - for result in results["data"]: - rerank_document = RerankDocument( - index=result["index"], - text=result["input"], - score=result["score"], - ) - if score_threshold is None or result["score"] >= score_threshold: - rerank_documents.append(rerank_document) - - return RerankResult(model=model, docs=rerank_documents) - except httpx.HTTPStatusError as e: - raise InvokeServerUnavailableError(str(e)) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke( - model=model, - credentials=credentials, - query="What is the capital of the United States?", - docs=[ - "Carson City is the capital city of the American state of Nevada. At the 2010 United States " - "Census, Carson City had a population of 55,274.", - "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " - "are a political division controlled by the United States. Its capital is Saipan.", - ], - score_threshold=0.8, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - """ - return { - InvokeConnectionError: [httpx.ConnectError], - InvokeServerUnavailableError: [httpx.RemoteProtocolError], - InvokeRateLimitError: [], - InvokeAuthorizationError: [httpx.HTTPStatusError], - InvokeBadRequestError: [httpx.RequestError], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - generate custom model entities from credentials - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - model_type=ModelType.RERANK, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", "512"))}, - ) - - return entity diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/__init__.py b/api/core/model_runtime/model_providers/mixedbread/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-2d-large-v1-en.yaml b/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-2d-large-v1-en.yaml deleted file mode 100644 index 0c3c863d06..0000000000 --- a/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-2d-large-v1-en.yaml +++ /dev/null @@ -1,8 +0,0 @@ -model: mxbai-embed-2d-large-v1 -model_type: text-embedding -model_properties: - context_size: 512 -pricing: - input: '0.0001' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-large-v1-en.yaml b/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-large-v1-en.yaml deleted file mode 100644 index 0c5cda2a72..0000000000 --- a/api/core/model_runtime/model_providers/mixedbread/text_embedding/mxbai-embed-large-v1-en.yaml +++ /dev/null @@ -1,8 +0,0 @@ -model: mxbai-embed-large-v1 -model_type: text-embedding -model_properties: - context_size: 512 -pricing: - input: '0.0001' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/model_provider_factory.py b/api/core/model_runtime/model_providers/model_provider_factory.py index e2d17e3257..1370676f0e 100644 --- a/api/core/model_runtime/model_providers/model_provider_factory.py +++ b/api/core/model_runtime/model_providers/model_provider_factory.py @@ -3,61 +3,116 @@ import os from collections.abc import Sequence from typing import Optional -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel -from core.helper.module_import_helper import load_single_subclass_from_source from core.helper.position_helper import get_provider_position_map, sort_to_dict_by_position_map -from core.model_runtime.entities.model_entities import ModelType +from core.model_runtime.entities.model_entities import AIModelEntity, ModelType from core.model_runtime.entities.provider_entities import ProviderConfig, ProviderEntity, SimpleProviderEntity -from core.model_runtime.model_providers.__base.model_provider import ModelProvider +from core.model_runtime.model_providers.__base.ai_model import AIModel +from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel +from core.model_runtime.model_providers.__base.moderation_model import ModerationModel +from core.model_runtime.model_providers.__base.rerank_model import RerankModel +from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel +from core.model_runtime.model_providers.__base.text2img_model import Text2ImageModel +from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel +from core.model_runtime.model_providers.__base.tts_model import TTSModel from core.model_runtime.schema_validators.model_credential_schema_validator import ModelCredentialSchemaValidator from core.model_runtime.schema_validators.provider_credential_schema_validator import ProviderCredentialSchemaValidator +from core.plugin.entities.plugin_daemon import PluginModelProviderEntity +from core.plugin.manager.asset import PluginAssetManager +from core.plugin.manager.model import PluginModelManager logger = logging.getLogger(__name__) class ModelProviderExtension(BaseModel): - model_config = ConfigDict(arbitrary_types_allowed=True) - - provider_instance: ModelProvider - name: str + plugin_model_provider_entity: PluginModelProviderEntity position: Optional[int] = None class ModelProviderFactory: - model_provider_extensions: Optional[dict[str, ModelProviderExtension]] = None + provider_position_map: dict[str, int] = {} + + def __init__(self, tenant_id: str) -> None: + self.tenant_id = tenant_id + self.plugin_model_manager = PluginModelManager() - def __init__(self) -> None: - # for cache in memory - self.get_providers() + if not self.provider_position_map: + # get the path of current classes + current_path = os.path.abspath(__file__) + model_providers_path = os.path.dirname(current_path) + + # get _position.yaml file path + self.provider_position_map = get_provider_position_map(model_providers_path) def get_providers(self) -> Sequence[ProviderEntity]: """ Get all providers :return: list of providers """ - # scan all providers - model_provider_extensions = self._get_model_provider_map() + # Fetch plugin model providers + plugin_providers = self.get_plugin_model_providers() - # traverse all model_provider_extensions - providers = [] - for model_provider_extension in model_provider_extensions.values(): - # get model_provider instance - model_provider_instance = model_provider_extension.provider_instance + # Convert PluginModelProviderEntity to ModelProviderExtension + model_provider_extensions = [] + for provider in plugin_providers: + model_provider_extensions.append(ModelProviderExtension(plugin_model_provider_entity=provider)) - # get provider schema - provider_schema = model_provider_instance.get_provider_schema() + sorted_extensions = sort_to_dict_by_position_map( + position_map=self.provider_position_map, + data=model_provider_extensions, + name_func=lambda x: x.plugin_model_provider_entity.declaration.provider, + ) - for model_type in provider_schema.supported_model_types: - # get predefined models for given model type - models = model_provider_instance.models(model_type) - if models: - provider_schema.models.extend(models) + return [extension.plugin_model_provider_entity.declaration for extension in sorted_extensions.values()] - providers.append(provider_schema) + def get_plugin_model_providers(self) -> Sequence[PluginModelProviderEntity]: + """ + Get all plugin model providers + :return: list of plugin model providers + """ + # Fetch plugin model providers + plugin_providers = self.plugin_model_manager.fetch_model_providers(self.tenant_id) - # return providers - return providers + for provider in plugin_providers: + provider.declaration.provider = provider.plugin_id + "/" + provider.declaration.provider + + return plugin_providers + + def get_provider_schema(self, provider: str) -> ProviderEntity: + """ + Get provider schema + :param provider: provider name + :return: provider schema + """ + plugin_model_provider_entity = self.get_plugin_model_provider(provider=provider) + return plugin_model_provider_entity.declaration + + def get_plugin_model_provider(self, provider: str) -> PluginModelProviderEntity: + """ + Get plugin model provider + :param provider: provider name + :return: provider schema + """ + # fetch plugin model providers + plugin_model_provider_entities = self.get_plugin_model_providers() + + plugin_id, provider_name = self.get_plugin_id_and_provider_name_from_provider(provider) + + # get the provider + plugin_model_provider_entity = next( + ( + p + for p in plugin_model_provider_entities + if p.declaration.provider == provider_name and (plugin_id and p.plugin_id == plugin_id) + ), + None, + ) + + if not plugin_model_provider_entity: + raise ValueError(f"Invalid provider: {provider}") + + return plugin_model_provider_entity def provider_credentials_validate(self, *, provider: str, credentials: dict) -> dict: """ @@ -67,15 +122,11 @@ class ModelProviderFactory: :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. :return: """ - # get the provider instance - model_provider_instance = self.get_provider_instance(provider) - - # get provider schema - provider_schema = model_provider_instance.get_provider_schema() + # fetch plugin model provider + plugin_model_provider_entity = self.get_plugin_model_provider(provider=provider) # get provider_credential_schema and validate credentials according to the rules - provider_credential_schema = provider_schema.provider_credential_schema - + provider_credential_schema = plugin_model_provider_entity.declaration.provider_credential_schema if not provider_credential_schema: raise ValueError(f"Provider {provider} does not have provider_credential_schema") @@ -84,7 +135,13 @@ class ModelProviderFactory: filtered_credentials = validator.validate_and_filter(credentials) # validate the credentials, raise exception if validation failed - model_provider_instance.validate_provider_credentials(filtered_credentials) + self.plugin_model_manager.validate_provider_credentials( + tenant_id=self.tenant_id, + user_id="unknown", + plugin_id=plugin_model_provider_entity.plugin_id, + provider=provider, + credentials=filtered_credentials, + ) return filtered_credentials @@ -100,15 +157,11 @@ class ModelProviderFactory: :param credentials: model credentials, credentials form defined in `model_credential_schema`. :return: """ - # get the provider instance - model_provider_instance = self.get_provider_instance(provider) - - # get provider schema - provider_schema = model_provider_instance.get_provider_schema() + # fetch plugin model provider + plugin_model_provider_entity = self.get_plugin_model_provider(provider=provider) # get model_credential_schema and validate credentials according to the rules - model_credential_schema = provider_schema.model_credential_schema - + model_credential_schema = plugin_model_provider_entity.declaration.model_credential_schema if not model_credential_schema: raise ValueError(f"Provider {provider} does not have model_credential_schema") @@ -116,14 +169,38 @@ class ModelProviderFactory: validator = ModelCredentialSchemaValidator(model_type, model_credential_schema) filtered_credentials = validator.validate_and_filter(credentials) - # get model instance of the model type - model_instance = model_provider_instance.get_model_instance(model_type) - # call validate_credentials method of model type to validate credentials, raise exception if validation failed - model_instance.validate_credentials(model, filtered_credentials) + self.plugin_model_manager.validate_model_credentials( + tenant_id=self.tenant_id, + user_id="unknown", + plugin_id=plugin_model_provider_entity.plugin_id, + provider=provider, + model_type=model_type.value, + model=model, + credentials=filtered_credentials, + ) return filtered_credentials + def get_model_schema( + self, *, provider: str, model_type: ModelType, model: str, credentials: dict + ) -> AIModelEntity | None: + """ + Get model schema + """ + plugin_id, provider_name = self.get_plugin_id_and_provider_name_from_provider(provider) + model_schema = self.plugin_model_manager.get_model_schema( + tenant_id=self.tenant_id, + user_id="unknown", + plugin_id=plugin_id, + provider=provider_name, + model_type=model_type.value, + model=model, + credentials=credentials, + ) + + return model_schema + def get_models( self, *, @@ -142,7 +219,7 @@ class ModelProviderFactory: provider_configs = provider_configs or [] # scan all providers - model_provider_extensions = self._get_model_provider_map() + plugin_model_provider_entities = self.get_plugin_model_providers() # convert provider_configs to dict provider_credentials_dict = {} @@ -151,16 +228,13 @@ class ModelProviderFactory: # traverse all model_provider_extensions providers = [] - for name, model_provider_extension in model_provider_extensions.items(): + for plugin_model_provider_entity in plugin_model_provider_entities: # filter by provider if provider is present - if provider and name != provider: + if provider and plugin_model_provider_entity.declaration.provider != provider: continue - # get model_provider instance - model_provider_instance = model_provider_extension.provider_instance - # get provider schema - provider_schema = model_provider_instance.get_provider_schema() + provider_schema = plugin_model_provider_entity.declaration model_types = provider_schema.supported_model_types if model_type: @@ -170,13 +244,11 @@ class ModelProviderFactory: model_types = [model_type] all_model_type_models = [] - for model_type in model_types: - # get predefined models for given model type - models = model_provider_instance.models( - model_type=model_type, - ) + for model_schema in provider_schema.models: + if model_schema.model_type != model_type: + continue - all_model_type_models.extend(models) + all_model_type_models.append(model_schema) simple_provider_schema = provider_schema.to_simple_provider() simple_provider_schema.models.extend(all_model_type_models) @@ -185,95 +257,82 @@ class ModelProviderFactory: return providers - def get_provider_instance(self, provider: str) -> ModelProvider: + def get_model_type_instance(self, provider: str, model_type: ModelType) -> AIModel: """ - Get provider instance by provider name + Get model type instance by provider name and model type :param provider: provider name - :return: provider instance + :param model_type: model type + :return: model type instance """ - # scan all providers - model_provider_extensions = self._get_model_provider_map() - - # get the provider extension - model_provider_extension = model_provider_extensions.get(provider) - if not model_provider_extension: - raise Exception(f"Invalid provider: {provider}") - - # get the provider instance - model_provider_instance = model_provider_extension.provider_instance - - return model_provider_instance - - def _get_model_provider_map(self) -> dict[str, ModelProviderExtension]: + plugin_id, provider_name = self.get_plugin_id_and_provider_name_from_provider(provider) + init_params = { + "tenant_id": self.tenant_id, + "plugin_id": plugin_id, + "provider_name": provider_name, + "plugin_model_provider": self.get_plugin_model_provider(provider), + } + + if model_type == ModelType.LLM: + return LargeLanguageModel(**init_params) + elif model_type == ModelType.TEXT_EMBEDDING: + return TextEmbeddingModel(**init_params) + elif model_type == ModelType.RERANK: + return RerankModel(**init_params) + elif model_type == ModelType.SPEECH2TEXT: + return Speech2TextModel(**init_params) + elif model_type == ModelType.MODERATION: + return ModerationModel(**init_params) + elif model_type == ModelType.TTS: + return TTSModel(**init_params) + elif model_type == ModelType.TEXT2IMG: + return Text2ImageModel(**init_params) + + def get_provider_icon(self, provider: str, icon_type: str, lang: str) -> bytes: """ - Retrieves the model provider map. - - This method retrieves the model provider map, which is a dictionary containing the model provider names as keys - and instances of `ModelProviderExtension` as values. The model provider map is used to store information about - available model providers. - - Returns: - A dictionary containing the model provider map. - - Raises: - None. + Get provider icon + :param provider: provider name + :param icon_type: icon type (icon_small or icon_large) + :param lang: language (zh_Hans or en_US) + :return: provider icon """ - if self.model_provider_extensions: - return self.model_provider_extensions - - # get the path of current classes - current_path = os.path.abspath(__file__) - model_providers_path = os.path.dirname(current_path) - - # get all folders path under model_providers_path that do not start with __ - model_provider_dir_paths = [ - os.path.join(model_providers_path, model_provider_dir) - for model_provider_dir in os.listdir(model_providers_path) - if not model_provider_dir.startswith("__") - and os.path.isdir(os.path.join(model_providers_path, model_provider_dir)) - ] - - # get _position.yaml file path - position_map = get_provider_position_map(model_providers_path) - - # traverse all model_provider_dir_paths - model_providers: list[ModelProviderExtension] = [] - for model_provider_dir_path in model_provider_dir_paths: - # get model_provider dir name - model_provider_name = os.path.basename(model_provider_dir_path) - - file_names = os.listdir(model_provider_dir_path) - - if (model_provider_name + ".py") not in file_names: - logger.warning(f"Missing {model_provider_name}.py file in {model_provider_dir_path}, Skip.") - continue - - # Dynamic loading {model_provider_name}.py file and find the subclass of ModelProvider - py_path = os.path.join(model_provider_dir_path, model_provider_name + ".py") - model_provider_class = load_single_subclass_from_source( - module_name=f"core.model_runtime.model_providers.{model_provider_name}.{model_provider_name}", - script_path=py_path, - parent_type=ModelProvider, - ) - - if not model_provider_class: - logger.warning(f"Missing Model Provider Class that extends ModelProvider in {py_path}, Skip.") - continue - - if f"{model_provider_name}.yaml" not in file_names: - logger.warning(f"Missing {model_provider_name}.yaml file in {model_provider_dir_path}, Skip.") - continue - - model_providers.append( - ModelProviderExtension( - name=model_provider_name, - provider_instance=model_provider_class(), - position=position_map.get(model_provider_name), - ) - ) - - sorted_extensions = sort_to_dict_by_position_map(position_map, model_providers, lambda x: x.name) - - self.model_provider_extensions = sorted_extensions - - return sorted_extensions + # get the provider schema + provider_schema = self.get_provider_schema(provider) + + if icon_type.lower() == "icon_small": + if not provider_schema.icon_small: + raise ValueError(f"Provider {provider} does not have small icon.") + + if lang.lower() == "zh_hans": + file_name = provider_schema.icon_small.zh_Hans + else: + file_name = provider_schema.icon_small.en_US + else: + if not provider_schema.icon_large: + raise ValueError(f"Provider {provider} does not have large icon.") + + if lang.lower() == "zh_hans": + file_name = provider_schema.icon_large.zh_Hans + else: + file_name = provider_schema.icon_large.en_US + + if not file_name: + raise ValueError(f"Provider {provider} does not have icon.") + + # get icon bytes from plugin asset manager + plugin_asset_manager = PluginAssetManager() + return plugin_asset_manager.fetch_asset(tenant_id=self.tenant_id, id=file_name) + + def get_plugin_id_and_provider_name_from_provider(self, provider: str) -> tuple[str, str]: + """ + Get plugin id and provider name from provider name + :param provider: provider name + :return: plugin id and provider name + """ + plugin_id = "langgenius" + provider_name = provider + if "/" in provider: + # get the plugin_id before provider + plugin_id = "/".join(provider.split("/")[:-1]) + provider_name = provider.split("/")[-1] + + return plugin_id, provider_name diff --git a/api/core/model_runtime/model_providers/moonshot/__init__.py b/api/core/model_runtime/model_providers/moonshot/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/moonshot/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/moonshot/_assets/icon_l_en.png deleted file mode 100644 index a411526d3d..0000000000 Binary files a/api/core/model_runtime/model_providers/moonshot/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/moonshot/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/moonshot/_assets/icon_s_en.png deleted file mode 100644 index 58ba4b4623..0000000000 Binary files a/api/core/model_runtime/model_providers/moonshot/_assets/icon_s_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/moonshot/llm/__init__.py b/api/core/model_runtime/model_providers/moonshot/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/moonshot/llm/_position.yaml b/api/core/model_runtime/model_providers/moonshot/llm/_position.yaml deleted file mode 100644 index 1810ec61d6..0000000000 --- a/api/core/model_runtime/model_providers/moonshot/llm/_position.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- moonshot-v1-8k -- moonshot-v1-32k -- moonshot-v1-128k diff --git a/api/core/model_runtime/model_providers/moonshot/llm/llm.py b/api/core/model_runtime/model_providers/moonshot/llm/llm.py deleted file mode 100644 index 3ea46c2967..0000000000 --- a/api/core/model_runtime/model_providers/moonshot/llm/llm.py +++ /dev/null @@ -1,327 +0,0 @@ -import json -from collections.abc import Generator -from typing import Optional, Union, cast - -import requests - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContent, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - ModelFeature, - ModelPropertyKey, - ModelType, - ParameterRule, - ParameterType, -) -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel - - -class MoonshotLargeLanguageModel(OAIAPICompatLargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._add_custom_parameters(credentials) - self._add_function_call(model, credentials) - user = user[:32] if user else None - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials) - super().validate_credentials(model, credentials) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - return AIModelEntity( - model=model, - label=I18nObject(en_US=model, zh_Hans=model), - model_type=ModelType.LLM, - features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL, ModelFeature.STREAM_TOOL_CALL] - if credentials.get("function_calling_type") == "tool_call" - else [], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 4096)), - ModelPropertyKey.MODE: LLMMode.CHAT.value, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - use_template="temperature", - label=I18nObject(en_US="Temperature", zh_Hans="温度"), - type=ParameterType.FLOAT, - ), - ParameterRule( - name="max_tokens", - use_template="max_tokens", - default=512, - min=1, - max=int(credentials.get("max_tokens", 4096)), - label=I18nObject(en_US="Max Tokens", zh_Hans="最大标记"), - type=ParameterType.INT, - ), - ParameterRule( - name="top_p", - use_template="top_p", - label=I18nObject(en_US="Top P", zh_Hans="Top P"), - type=ParameterType.FLOAT, - ), - ], - ) - - def _add_custom_parameters(self, credentials: dict) -> None: - credentials["mode"] = "chat" - if "endpoint_url" not in credentials or credentials["endpoint_url"] == "": - credentials["endpoint_url"] = "https://api.moonshot.cn/v1" - - def _add_function_call(self, model: str, credentials: dict) -> None: - model_schema = self.get_model_schema(model, credentials) - if model_schema and {ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL}.intersection( - model_schema.features or [] - ): - credentials["function_calling_type"] = "tool_call" - - def _convert_prompt_message_to_dict(self, message: PromptMessage, credentials: Optional[dict] = None) -> dict: - """ - Convert PromptMessage to dict for OpenAI API format - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - sub_messages = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(PromptMessageContent, message_content) - sub_message_dict = {"type": "text", "text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - sub_message_dict = { - "type": "image_url", - "image_url": {"url": message_content.data, "detail": message_content.detail.value}, - } - sub_messages.append(sub_message_dict) - message_dict = {"role": "user", "content": sub_messages} - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - if message.tool_calls: - message_dict["tool_calls"] = [] - for function_call in message.tool_calls: - message_dict["tool_calls"].append( - { - "id": function_call.id, - "type": function_call.type, - "function": { - "name": function_call.function.name, - "arguments": function_call.function.arguments, - }, - } - ) - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - message_dict = {"role": "tool", "content": message.content, "tool_call_id": message.tool_call_id} - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - else: - raise ValueError(f"Got unknown type {message}") - - if message.name: - message_dict["name"] = message.name - - return message_dict - - def _extract_response_tool_calls(self, response_tool_calls: list[dict]) -> list[AssistantPromptMessage.ToolCall]: - """ - Extract tool calls from response - - :param response_tool_calls: response tool calls - :return: list of tool calls - """ - tool_calls = [] - if response_tool_calls: - for response_tool_call in response_tool_calls: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_tool_call["function"]["name"] - if response_tool_call.get("function", {}).get("name") - else "", - arguments=response_tool_call["function"]["arguments"] - if response_tool_call.get("function", {}).get("arguments") - else "", - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_tool_call["id"] if response_tool_call.get("id") else "", - type=response_tool_call["type"] if response_tool_call.get("type") else "", - function=function, - ) - tool_calls.append(tool_call) - - return tool_calls - - def _handle_generate_stream_response( - self, model: str, credentials: dict, response: requests.Response, prompt_messages: list[PromptMessage] - ) -> Generator: - """ - Handle llm stream response - - :param model: model name - :param credentials: model credentials - :param response: streamed response - :param prompt_messages: prompt messages - :return: llm response chunk generator - """ - full_assistant_content = "" - chunk_index = 0 - - def create_final_llm_result_chunk( - index: int, message: AssistantPromptMessage, finish_reason: str - ) -> LLMResultChunk: - # calculate num tokens - prompt_tokens = self._num_tokens_from_string(model, prompt_messages[0].content) - completion_tokens = self._num_tokens_from_string(model, full_assistant_content) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - return LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta(index=index, message=message, finish_reason=finish_reason, usage=usage), - ) - - tools_calls: list[AssistantPromptMessage.ToolCall] = [] - finish_reason = "Unknown" - - def increase_tool_call(new_tool_calls: list[AssistantPromptMessage.ToolCall]): - def get_tool_call(tool_name: str): - if not tool_name: - return tools_calls[-1] - - tool_call = next((tool_call for tool_call in tools_calls if tool_call.function.name == tool_name), None) - if tool_call is None: - tool_call = AssistantPromptMessage.ToolCall( - id="", - type="", - function=AssistantPromptMessage.ToolCall.ToolCallFunction(name=tool_name, arguments=""), - ) - tools_calls.append(tool_call) - - return tool_call - - for new_tool_call in new_tool_calls: - # get tool call - tool_call = get_tool_call(new_tool_call.function.name) - # update tool call - if new_tool_call.id: - tool_call.id = new_tool_call.id - if new_tool_call.type: - tool_call.type = new_tool_call.type - if new_tool_call.function.name: - tool_call.function.name = new_tool_call.function.name - if new_tool_call.function.arguments: - tool_call.function.arguments += new_tool_call.function.arguments - - for chunk in response.iter_lines(decode_unicode=True, delimiter="\n\n"): - if chunk: - # ignore sse comments - if chunk.startswith(":"): - continue - decoded_chunk = chunk.strip().lstrip("data: ").lstrip() - chunk_json = None - try: - chunk_json = json.loads(decoded_chunk) - # stream ended - except json.JSONDecodeError as e: - yield create_final_llm_result_chunk( - index=chunk_index + 1, - message=AssistantPromptMessage(content=""), - finish_reason="Non-JSON encountered.", - ) - break - if not chunk_json or len(chunk_json["choices"]) == 0: - continue - - choice = chunk_json["choices"][0] - finish_reason = chunk_json["choices"][0].get("finish_reason") - chunk_index += 1 - - if "delta" in choice: - delta = choice["delta"] - delta_content = delta.get("content") - - assistant_message_tool_calls = delta.get("tool_calls", None) - # assistant_message_function_call = delta.delta.function_call - - # extract tool calls from response - if assistant_message_tool_calls: - tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls) - increase_tool_call(tool_calls) - - if delta_content is None or delta_content == "": - continue - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=delta_content, tool_calls=tool_calls if assistant_message_tool_calls else [] - ) - - full_assistant_content += delta_content - elif "text" in choice: - choice_text = choice.get("text", "") - if choice_text == "": - continue - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=choice_text) - full_assistant_content += choice_text - else: - continue - - # check payload indicator for completion - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=chunk_index, - message=assistant_prompt_message, - ), - ) - - chunk_index += 1 - - if tools_calls: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=chunk_index, - message=AssistantPromptMessage(tool_calls=tools_calls, content=""), - ), - ) - - yield create_final_llm_result_chunk( - index=chunk_index, message=AssistantPromptMessage(content=""), finish_reason=finish_reason - ) diff --git a/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-128k.yaml b/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-128k.yaml deleted file mode 100644 index 59c0915ee9..0000000000 --- a/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-128k.yaml +++ /dev/null @@ -1,40 +0,0 @@ -model: moonshot-v1-128k -label: - zh_Hans: moonshot-v1-128k - en_US: moonshot-v1-128k -model_type: llm -features: - - agent-thought - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 128000 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.06' - output: '0.06' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-32k.yaml b/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-32k.yaml deleted file mode 100644 index 724f2aa5a2..0000000000 --- a/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-32k.yaml +++ /dev/null @@ -1,40 +0,0 @@ -model: moonshot-v1-32k -label: - zh_Hans: moonshot-v1-32k - en_US: moonshot-v1-32k -model_type: llm -features: - - agent-thought - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 32000 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.024' - output: '0.024' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-8k.yaml b/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-8k.yaml deleted file mode 100644 index 5872295bfa..0000000000 --- a/api/core/model_runtime/model_providers/moonshot/llm/moonshot-v1-8k.yaml +++ /dev/null @@ -1,40 +0,0 @@ -model: moonshot-v1-8k -label: - zh_Hans: moonshot-v1-8k - en_US: moonshot-v1-8k -model_type: llm -features: - - agent-thought - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.012' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/moonshot/moonshot.py b/api/core/model_runtime/model_providers/moonshot/moonshot.py deleted file mode 100644 index 4995e235f5..0000000000 --- a/api/core/model_runtime/model_providers/moonshot/moonshot.py +++ /dev/null @@ -1,26 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class MoonshotProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - model_instance.validate_credentials(model="moonshot-v1-8k", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/moonshot/moonshot.yaml b/api/core/model_runtime/model_providers/moonshot/moonshot.yaml deleted file mode 100644 index 41e9c2e808..0000000000 --- a/api/core/model_runtime/model_providers/moonshot/moonshot.yaml +++ /dev/null @@ -1,89 +0,0 @@ -provider: moonshot -label: - zh_Hans: 月之暗面 - en_US: Moonshot -description: - en_US: Models provided by Moonshot, such as moonshot-v1-8k, moonshot-v1-32k, and moonshot-v1-128k. - zh_Hans: Moonshot 提供的模型,例如 moonshot-v1-8k、moonshot-v1-32k 和 moonshot-v1-128k。 -icon_small: - en_US: icon_s_en.png -icon_large: - en_US: icon_l_en.png -background: "#FFFFFF" -help: - title: - en_US: Get your API Key from Moonshot - zh_Hans: 从 Moonshot 获取 API Key - url: - en_US: https://platform.moonshot.cn/console/api-keys -supported_model_types: - - llm -configurate_methods: - - predefined-model - - customizable-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: endpoint_url - label: - en_US: API Base - type: text-input - required: false - placeholder: - zh_Hans: Base URL, 如:https://api.moonshot.cn/v1 - en_US: Base URL, e.g. https://api.moonshot.cn/v1 -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: context_size - label: - zh_Hans: 模型上下文长度 - en_US: Model context size - required: true - type: text-input - default: '4096' - placeholder: - zh_Hans: 在此输入您的模型上下文长度 - en_US: Enter your Model context size - - variable: max_tokens - label: - zh_Hans: 最大 token 上限 - en_US: Upper bound for max tokens - default: '4096' - type: text-input - - variable: function_calling_type - label: - en_US: Function calling - type: select - required: false - default: no_call - options: - - value: no_call - label: - en_US: Not supported - zh_Hans: 不支持 - - value: tool_call - label: - en_US: Tool Call - zh_Hans: Tool Call diff --git a/api/core/model_runtime/model_providers/nomic/__init__.py b/api/core/model_runtime/model_providers/nomic/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/nomic/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/nomic/_assets/icon_l_en.svg deleted file mode 100644 index 6c4a1058ab..0000000000 --- a/api/core/model_runtime/model_providers/nomic/_assets/icon_l_en.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/nomic/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/nomic/_assets/icon_s_en.png deleted file mode 100644 index 3eba3b82bc..0000000000 Binary files a/api/core/model_runtime/model_providers/nomic/_assets/icon_s_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/nomic/_common.py b/api/core/model_runtime/model_providers/nomic/_common.py deleted file mode 100644 index 406577dcd7..0000000000 --- a/api/core/model_runtime/model_providers/nomic/_common.py +++ /dev/null @@ -1,28 +0,0 @@ -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) - - -class _CommonNomic: - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [KeyError, InvokeBadRequestError], - } diff --git a/api/core/model_runtime/model_providers/nomic/nomic.py b/api/core/model_runtime/model_providers/nomic/nomic.py deleted file mode 100644 index d4e5da2e98..0000000000 --- a/api/core/model_runtime/model_providers/nomic/nomic.py +++ /dev/null @@ -1,26 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class NomicAtlasProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.TEXT_EMBEDDING) - model_instance.validate_credentials(model="nomic-embed-text-v1.5", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/nomic/nomic.yaml b/api/core/model_runtime/model_providers/nomic/nomic.yaml deleted file mode 100644 index 60dcf1facb..0000000000 --- a/api/core/model_runtime/model_providers/nomic/nomic.yaml +++ /dev/null @@ -1,29 +0,0 @@ -provider: nomic -label: - zh_Hans: Nomic Atlas - en_US: Nomic Atlas -icon_small: - en_US: icon_s_en.png -icon_large: - en_US: icon_l_en.svg -background: "#EFF1FE" -help: - title: - en_US: Get your API key from Nomic Atlas - zh_Hans: 从Nomic Atlas获取 API Key - url: - en_US: https://atlas.nomic.ai/data -supported_model_types: - - text-embedding -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: nomic_api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/__init__.py b/api/core/model_runtime/model_providers/nomic/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.5.yaml b/api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.5.yaml deleted file mode 100644 index 111452df57..0000000000 --- a/api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.5.yaml +++ /dev/null @@ -1,8 +0,0 @@ -model: nomic-embed-text-v1.5 -model_type: text-embedding -model_properties: - context_size: 8192 -pricing: - input: "0.1" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.yaml b/api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.yaml deleted file mode 100644 index ac59f106ed..0000000000 --- a/api/core/model_runtime/model_providers/nomic/text_embedding/nomic-embed-text-v1.yaml +++ /dev/null @@ -1,8 +0,0 @@ -model: nomic-embed-text-v1 -model_type: text-embedding -model_properties: - context_size: 8192 -pricing: - input: "0.1" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/novita/_assets/icon_l_en.svg deleted file mode 100644 index 5c92cdbc6d..0000000000 --- a/api/core/model_runtime/model_providers/novita/_assets/icon_l_en.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/novita/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/novita/_assets/icon_s_en.svg deleted file mode 100644 index 798c1d6348..0000000000 --- a/api/core/model_runtime/model_providers/novita/_assets/icon_s_en.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/novita/llm/Nous-Hermes-2-Mixtral-8x7B-DPO.yaml b/api/core/model_runtime/model_providers/novita/llm/Nous-Hermes-2-Mixtral-8x7B-DPO.yaml deleted file mode 100644 index 7ff30458e2..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/Nous-Hermes-2-Mixtral-8x7B-DPO.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: Nous-Hermes-2-Mixtral-8x7B-DPO -label: - zh_Hans: Nous-Hermes-2-Mixtral-8x7B-DPO - en_US: Nous-Hermes-2-Mixtral-8x7B-DPO -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.0027' - output: '0.0027' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/airoboros-l2-70b.yaml b/api/core/model_runtime/model_providers/novita/llm/airoboros-l2-70b.yaml deleted file mode 100644 index b599418461..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/airoboros-l2-70b.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: jondurbin/airoboros-l2-70b -label: - zh_Hans: jondurbin/airoboros-l2-70b - en_US: jondurbin/airoboros-l2-70b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.005' - output: '0.005' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/dolphin-mixtral-8x22b.yaml b/api/core/model_runtime/model_providers/novita/llm/dolphin-mixtral-8x22b.yaml deleted file mode 100644 index 72a181f5d3..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/dolphin-mixtral-8x22b.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: cognitivecomputations/dolphin-mixtral-8x22b -label: - zh_Hans: cognitivecomputations/dolphin-mixtral-8x22b - en_US: cognitivecomputations/dolphin-mixtral-8x22b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 16000 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.009' - output: '0.009' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/gemma-2-9b-it.yaml b/api/core/model_runtime/model_providers/novita/llm/gemma-2-9b-it.yaml deleted file mode 100644 index d1749bc882..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/gemma-2-9b-it.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: google/gemma-2-9b-it -label: - zh_Hans: google/gemma-2-9b-it - en_US: google/gemma-2-9b-it -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.0008' - output: '0.0008' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/hermes-2-pro-llama-3-8b.yaml b/api/core/model_runtime/model_providers/novita/llm/hermes-2-pro-llama-3-8b.yaml deleted file mode 100644 index 8b3228e56a..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/hermes-2-pro-llama-3-8b.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: nousresearch/hermes-2-pro-llama-3-8b -label: - zh_Hans: nousresearch/hermes-2-pro-llama-3-8b - en_US: nousresearch/hermes-2-pro-llama-3-8b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.0014' - output: '0.0014' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/l3-70b-euryale-v2.1.yaml b/api/core/model_runtime/model_providers/novita/llm/l3-70b-euryale-v2.1.yaml deleted file mode 100644 index 5e27941c52..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/l3-70b-euryale-v2.1.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: sao10k/l3-70b-euryale-v2.1 -label: - zh_Hans: sao10k/l3-70b-euryale-v2.1 - en_US: sao10k/l3-70b-euryale-v2.1 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 16000 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.0148' - output: '0.0148' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/llama-3-70b-instruct.yaml b/api/core/model_runtime/model_providers/novita/llm/llama-3-70b-instruct.yaml deleted file mode 100644 index 39709e1063..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/llama-3-70b-instruct.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: meta-llama/llama-3-70b-instruct -label: - zh_Hans: meta-llama/llama-3-70b-instruct - en_US: meta-llama/llama-3-70b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.0051' - output: '0.0074' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/llama-3-8b-instruct.yaml b/api/core/model_runtime/model_providers/novita/llm/llama-3-8b-instruct.yaml deleted file mode 100644 index 9b5e5df4d0..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/llama-3-8b-instruct.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: meta-llama/llama-3-8b-instruct -label: - zh_Hans: meta-llama/llama-3-8b-instruct - en_US: meta-llama/llama-3-8b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.00063' - output: '0.00063' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/llama-3.1-405b-instruct.yaml b/api/core/model_runtime/model_providers/novita/llm/llama-3.1-405b-instruct.yaml deleted file mode 100644 index c5a45271ae..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/llama-3.1-405b-instruct.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: meta-llama/llama-3.1-405b-instruct -label: - zh_Hans: meta-llama/llama-3.1-405b-instruct - en_US: meta-llama/llama-3.1-405b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.03' - output: '0.05' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/llama-3.1-70b-instruct.yaml b/api/core/model_runtime/model_providers/novita/llm/llama-3.1-70b-instruct.yaml deleted file mode 100644 index 3a5c29c40f..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/llama-3.1-70b-instruct.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: meta-llama/llama-3.1-70b-instruct -label: - zh_Hans: meta-llama/llama-3.1-70b-instruct - en_US: meta-llama/llama-3.1-70b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.0055' - output: '0.0076' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/llama-3.1-8b-instruct.yaml b/api/core/model_runtime/model_providers/novita/llm/llama-3.1-8b-instruct.yaml deleted file mode 100644 index e6ef772a3f..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/llama-3.1-8b-instruct.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: meta-llama/llama-3.1-8b-instruct -label: - zh_Hans: meta-llama/llama-3.1-8b-instruct - en_US: meta-llama/llama-3.1-8b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.001' - output: '0.001' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/llm.py b/api/core/model_runtime/model_providers/novita/llm/llm.py deleted file mode 100644 index 23367ed1b4..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/llm.py +++ /dev/null @@ -1,69 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union - -from core.model_runtime.entities.llm_entities import LLMResult -from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool -from core.model_runtime.entities.model_entities import AIModelEntity -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel - - -class NovitaLargeLanguageModel(OAIAPICompatLargeLanguageModel): - def _update_endpoint_url(self, credentials: dict): - credentials["endpoint_url"] = "https://api.novita.ai/v3/openai" - credentials["extra_headers"] = {"X-Novita-Source": "dify.ai"} - return credentials - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - cred_with_endpoint = self._update_endpoint_url(credentials=credentials) - return super()._invoke(model, cred_with_endpoint, prompt_messages, model_parameters, tools, stop, stream, user) - - def validate_credentials(self, model: str, credentials: dict) -> None: - cred_with_endpoint = self._update_endpoint_url(credentials=credentials) - self._add_custom_parameters(credentials, model) - return super().validate_credentials(model, cred_with_endpoint) - - @classmethod - def _add_custom_parameters(cls, credentials: dict, model: str) -> None: - credentials["mode"] = "chat" - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - cred_with_endpoint = self._update_endpoint_url(credentials=credentials) - return super()._generate( - model, cred_with_endpoint, prompt_messages, model_parameters, tools, stop, stream, user - ) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - cred_with_endpoint = self._update_endpoint_url(credentials=credentials) - - return super().get_customizable_model_schema(model, cred_with_endpoint) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - cred_with_endpoint = self._update_endpoint_url(credentials=credentials) - - return super().get_num_tokens(model, cred_with_endpoint, prompt_messages, tools) diff --git a/api/core/model_runtime/model_providers/novita/llm/lzlv_70b.yaml b/api/core/model_runtime/model_providers/novita/llm/lzlv_70b.yaml deleted file mode 100644 index 0cc68a8c45..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/lzlv_70b.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: lzlv_70b -label: - zh_Hans: lzlv_70b - en_US: lzlv_70b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.0058' - output: '0.0078' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/midnight-rose-70b.yaml b/api/core/model_runtime/model_providers/novita/llm/midnight-rose-70b.yaml deleted file mode 100644 index 19876bee17..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/midnight-rose-70b.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: sophosympatheia/midnight-rose-70b -label: - zh_Hans: sophosympatheia/midnight-rose-70b - en_US: sophosympatheia/midnight-rose-70b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.008' - output: '0.008' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/mistral-7b-instruct.yaml b/api/core/model_runtime/model_providers/novita/llm/mistral-7b-instruct.yaml deleted file mode 100644 index 6fba47bcf0..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/mistral-7b-instruct.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: mistralai/mistral-7b-instruct -label: - zh_Hans: mistralai/mistral-7b-instruct - en_US: mistralai/mistral-7b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.00059' - output: '0.00059' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/mythomax-l2-13b.yaml b/api/core/model_runtime/model_providers/novita/llm/mythomax-l2-13b.yaml deleted file mode 100644 index 7e4ac3ffe0..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/mythomax-l2-13b.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: gryphe/mythomax-l2-13b -label: - zh_Hans: gryphe/mythomax-l2-13b - en_US: gryphe/mythomax-l2-13b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.00119' - output: '0.00119' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/nous-hermes-llama2-13b.yaml b/api/core/model_runtime/model_providers/novita/llm/nous-hermes-llama2-13b.yaml deleted file mode 100644 index 75671c414c..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/nous-hermes-llama2-13b.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: nousresearch/nous-hermes-llama2-13b -label: - zh_Hans: nousresearch/nous-hermes-llama2-13b - en_US: nousresearch/nous-hermes-llama2-13b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.0017' - output: '0.0017' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/openhermes-2.5-mistral-7b.yaml b/api/core/model_runtime/model_providers/novita/llm/openhermes-2.5-mistral-7b.yaml deleted file mode 100644 index 8b0deba4f7..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/openhermes-2.5-mistral-7b.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: teknium/openhermes-2.5-mistral-7b -label: - zh_Hans: teknium/openhermes-2.5-mistral-7b - en_US: teknium/openhermes-2.5-mistral-7b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.0017' - output: '0.0017' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/llm/wizardlm-2-8x22b.yaml b/api/core/model_runtime/model_providers/novita/llm/wizardlm-2-8x22b.yaml deleted file mode 100644 index ef42568e8f..0000000000 --- a/api/core/model_runtime/model_providers/novita/llm/wizardlm-2-8x22b.yaml +++ /dev/null @@ -1,41 +0,0 @@ -model: microsoft/wizardlm-2-8x22b -label: - zh_Hans: microsoft/wizardlm-2-8x22b - en_US: microsoft/wizardlm-2-8x22b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 65535 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 2 - default: 1 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 512 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 -pricing: - input: '0.0064' - output: '0.0064' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/novita/novita.py b/api/core/model_runtime/model_providers/novita/novita.py deleted file mode 100644 index 76a75b01e2..0000000000 --- a/api/core/model_runtime/model_providers/novita/novita.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class NovitaProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `meta-llama/llama-3-8b-instruct` model for validate, - # no matter what model you pass in, text completion model or chat model - model_instance.validate_credentials(model="meta-llama/llama-3-8b-instruct", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/novita/novita.yaml b/api/core/model_runtime/model_providers/novita/novita.yaml deleted file mode 100644 index f634197989..0000000000 --- a/api/core/model_runtime/model_providers/novita/novita.yaml +++ /dev/null @@ -1,31 +0,0 @@ -provider: novita -label: - en_US: novita.ai -description: - en_US: An LLM API that matches various application scenarios with high cost-effectiveness. - zh_Hans: 适配多种海外应用场景的高性价比 LLM API -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#eadeff" -help: - title: - en_US: Get your API key from novita.ai - zh_Hans: 从 novita.ai 获取 API Key - url: - en_US: https://novita.ai/settings#key-management?utm_source=dify&utm_medium=ch&utm_campaign=api -supported_model_types: - - llm -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - required: true - label: - en_US: API Key - type: secret-input - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/nvidia/__init__.py b/api/core/model_runtime/model_providers/nvidia/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/nvidia/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/nvidia/_assets/icon_l_en.png deleted file mode 100644 index 5a7f42e617..0000000000 Binary files a/api/core/model_runtime/model_providers/nvidia/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/nvidia/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/nvidia/_assets/icon_s_en.svg deleted file mode 100644 index 9fc02f9164..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/_assets/icon_s_en.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/api/core/model_runtime/model_providers/nvidia/llm/_position.yaml b/api/core/model_runtime/model_providers/nvidia/llm/_position.yaml deleted file mode 100644 index ad01d430d6..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/_position.yaml +++ /dev/null @@ -1,17 +0,0 @@ -- google/gemma-7b -- google/codegemma-7b -- google/recurrentgemma-2b -- meta/llama2-70b -- meta/llama-3.1-8b-instruct -- meta/llama-3.1-70b-instruct -- meta/llama-3.1-405b-instruct -- meta/llama3-8b-instruct -- meta/llama3-70b-instruct -- mistralai/mistral-large -- mistralai/mixtral-8x7b-instruct-v0.1 -- mistralai/mixtral-8x22b-instruct-v0.1 -- nvidia/nemotron-4-340b-instruct -- microsoft/phi-3-medium-128k-instruct -- microsoft/phi-3-mini-128k-instruct -- fuyu-8b -- snowflake/arctic diff --git a/api/core/model_runtime/model_providers/nvidia/llm/arctic.yaml b/api/core/model_runtime/model_providers/nvidia/llm/arctic.yaml deleted file mode 100644 index 7f53ae58e6..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/arctic.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: snowflake/arctic -label: - zh_Hans: snowflake/arctic - en_US: snowflake/arctic -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4000 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 1024 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/codegemma-7b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/codegemma-7b.yaml deleted file mode 100644 index 57446224a8..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/codegemma-7b.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: google/codegemma-7b -label: - zh_Hans: google/codegemma-7b - en_US: google/codegemma-7b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 1024 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/fuyu-8b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/fuyu-8b.yaml deleted file mode 100644 index 6ae524c6d8..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/fuyu-8b.yaml +++ /dev/null @@ -1,27 +0,0 @@ -model: fuyu-8b -label: - zh_Hans: fuyu-8b - en_US: fuyu-8b -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 16000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.2 - min: 0.1 - max: 1 - - name: top_p - use_template: top_p - default: 0.7 - min: 0.1 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 1024 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/gemma-7b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/gemma-7b.yaml deleted file mode 100644 index 794b820bf4..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/gemma-7b.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: google/gemma-7b -label: - zh_Hans: google/gemma-7b - en_US: google/gemma-7b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 1024 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llama-3.1-405b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/llama-3.1-405b.yaml deleted file mode 100644 index 5472de9902..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/llama-3.1-405b.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: meta/llama-3.1-405b-instruct -label: - zh_Hans: meta/llama-3.1-405b-instruct - en_US: meta/llama-3.1-405b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 4096 - default: 1024 - - name: frequency_penalt - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llama-3.1-70b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/llama-3.1-70b.yaml deleted file mode 100644 index 16af0554a1..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/llama-3.1-70b.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: meta/llama-3.1-70b-instruct -label: - zh_Hans: meta/llama-3.1-70b-instruct - en_US: meta/llama-3.1-70b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 4096 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llama-3.1-8b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/llama-3.1-8b.yaml deleted file mode 100644 index f2d43dc30e..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/llama-3.1-8b.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: meta/llama-3.1-8b-instruct -label: - zh_Hans: meta/llama-3.1-8b-instruct - en_US: meta/llama-3.1-8b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 4096 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llama2-70b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/llama2-70b.yaml deleted file mode 100644 index 9fba816b7f..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/llama2-70b.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: meta/llama2-70b -label: - zh_Hans: meta/llama2-70b - en_US: meta/llama2-70b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 1024 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llama3-70b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/llama3-70b.yaml deleted file mode 100644 index 4d591d4226..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/llama3-70b.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: meta/llama3-70b-instruct -label: - zh_Hans: meta/llama3-70b-instruct - en_US: meta/llama3-70b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 1024 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llama3-8b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/llama3-8b.yaml deleted file mode 100644 index 0139566674..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/llama3-8b.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: meta/llama3-8b-instruct -label: - zh_Hans: meta/llama3-8b-instruct - en_US: meta/llama3-8b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 1024 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llm.py b/api/core/model_runtime/model_providers/nvidia/llm/llm.py deleted file mode 100644 index 1c98c6be6c..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/llm.py +++ /dev/null @@ -1,247 +0,0 @@ -import json -from collections.abc import Generator -from typing import Optional, Union - -import requests -from yarl import URL - -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult -from core.model_runtime.entities.message_entities import ( - PromptMessage, - PromptMessageContentType, - PromptMessageFunction, - PromptMessageTool, - UserPromptMessage, -) -from core.model_runtime.errors.invoke import InvokeError -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel -from core.model_runtime.utils import helper - - -class NVIDIALargeLanguageModel(OAIAPICompatLargeLanguageModel): - MODEL_SUFFIX_MAP = { - "fuyu-8b": "vlm/adept/fuyu-8b", - "mistralai/mistral-large": "", - "mistralai/mixtral-8x7b-instruct-v0.1": "", - "mistralai/mixtral-8x22b-instruct-v0.1": "", - "google/gemma-7b": "", - "google/codegemma-7b": "", - "snowflake/arctic": "", - "meta/llama2-70b": "", - "meta/llama3-8b-instruct": "", - "meta/llama3-70b-instruct": "", - "meta/llama-3.1-8b-instruct": "", - "meta/llama-3.1-70b-instruct": "", - "meta/llama-3.1-405b-instruct": "", - "google/recurrentgemma-2b": "", - "nvidia/nemotron-4-340b-instruct": "", - "microsoft/phi-3-medium-128k-instruct": "", - "microsoft/phi-3-mini-128k-instruct": "", - } - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._add_custom_parameters(credentials, model) - prompt_messages = self._transform_prompt_messages(prompt_messages) - stop = [] - user = None - - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def _transform_prompt_messages(self, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: - """ - Handle Image transform - """ - for i, p in enumerate(prompt_messages): - if isinstance(p, UserPromptMessage) and isinstance(p.content, list): - content = p.content - content_text = "" - for prompt_content in content: - if prompt_content.type == PromptMessageContentType.TEXT: - content_text += prompt_content.data - else: - content_text += f' ' - - prompt_message = UserPromptMessage(content=content_text) - prompt_messages[i] = prompt_message - return prompt_messages - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials, model) - self._validate_credentials(model, credentials) - - def _add_custom_parameters(self, credentials: dict, model: str) -> None: - credentials["mode"] = "chat" - - if self.MODEL_SUFFIX_MAP[model]: - credentials["server_url"] = f"https://ai.api.nvidia.com/v1/{self.MODEL_SUFFIX_MAP[model]}" - credentials.pop("endpoint_url") - else: - credentials["endpoint_url"] = "https://integrate.api.nvidia.com/v1" - - credentials["stream_mode_delimiter"] = "\n" - - def _validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials using requests to ensure compatibility with all providers following - OpenAI's API standard. - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - headers = {"Content-Type": "application/json"} - - api_key = credentials.get("api_key") - if api_key: - headers["Authorization"] = f"Bearer {api_key}" - - endpoint_url = credentials.get("endpoint_url") - if endpoint_url and not endpoint_url.endswith("/"): - endpoint_url += "/" - server_url = credentials.get("server_url") - - # prepare the payload for a simple ping to the model - data = {"model": model, "max_tokens": 5} - - completion_type = LLMMode.value_of(credentials["mode"]) - - if completion_type is LLMMode.CHAT: - data["messages"] = [ - {"role": "user", "content": "ping"}, - ] - if "endpoint_url" in credentials: - endpoint_url = str(URL(endpoint_url) / "chat" / "completions") - elif "server_url" in credentials: - endpoint_url = server_url - elif completion_type is LLMMode.COMPLETION: - data["prompt"] = "ping" - if "endpoint_url" in credentials: - endpoint_url = str(URL(endpoint_url) / "completions") - elif "server_url" in credentials: - endpoint_url = server_url - else: - raise ValueError("Unsupported completion type for model configuration.") - - # send a post request to validate the credentials - response = requests.post(endpoint_url, headers=headers, json=data, timeout=(10, 300)) - - if response.status_code != 200: - raise CredentialsValidateFailedError( - f"Credentials validation failed with status code {response.status_code}" - ) - - try: - json_result = response.json() - except json.JSONDecodeError as e: - raise CredentialsValidateFailedError("Credentials validation failed: JSON decode error") - except CredentialsValidateFailedError: - raise - except Exception as ex: - raise CredentialsValidateFailedError(f"An error occurred during credentials validation: {str(ex)}") - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke llm completion model - - :param model: model name - :param credentials: credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - headers = { - "Content-Type": "application/json", - "Accept-Charset": "utf-8", - } - - api_key = credentials.get("api_key") - if api_key: - headers["Authorization"] = f"Bearer {api_key}" - - if stream: - headers["Accept"] = "text/event-stream" - - endpoint_url = credentials.get("endpoint_url") - if endpoint_url and not endpoint_url.endswith("/"): - endpoint_url += "/" - server_url = credentials.get("server_url") - - data = {"model": model, "stream": stream, **model_parameters} - - completion_type = LLMMode.value_of(credentials["mode"]) - - if completion_type is LLMMode.CHAT: - if "endpoint_url" in credentials: - endpoint_url = str(URL(endpoint_url) / "chat" / "completions") - elif "server_url" in credentials: - endpoint_url = server_url - data["messages"] = [self._convert_prompt_message_to_dict(m, credentials) for m in prompt_messages] - elif completion_type is LLMMode.COMPLETION: - data["prompt"] = "ping" - if "endpoint_url" in credentials: - endpoint_url = str(URL(endpoint_url) / "completions") - elif "server_url" in credentials: - endpoint_url = server_url - else: - raise ValueError("Unsupported completion type for model configuration.") - - # annotate tools with names, descriptions, etc. - function_calling_type = credentials.get("function_calling_type", "no_call") - formatted_tools = [] - if tools: - if function_calling_type == "function_call": - data["functions"] = [ - {"name": tool.name, "description": tool.description, "parameters": tool.parameters} - for tool in tools - ] - elif function_calling_type == "tool_call": - data["tool_choice"] = "auto" - - for tool in tools: - formatted_tools.append(helper.dump_model(PromptMessageFunction(function=tool))) - - data["tools"] = formatted_tools - - if stop: - data["stop"] = stop - - if user: - data["user"] = user - - response = requests.post(endpoint_url, headers=headers, json=data, timeout=(10, 300), stream=stream) - - if response.encoding is None or response.encoding == "ISO-8859-1": - response.encoding = "utf-8" - - if not response.ok: - raise InvokeError(f"API request failed with status code {response.status_code}: {response.text}") - - if stream: - return self._handle_generate_stream_response(model, credentials, response, prompt_messages) - - return self._handle_generate_response(model, credentials, response, prompt_messages) diff --git a/api/core/model_runtime/model_providers/nvidia/llm/mistral-large.yaml b/api/core/model_runtime/model_providers/nvidia/llm/mistral-large.yaml deleted file mode 100644 index 3e14d22141..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/mistral-large.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: mistralai/mistral-large -label: - zh_Hans: mistralai/mistral-large - en_US: mistralai/mistral-large -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 1024 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/mistralai_mixtral-8x7b-instruct-v0.1.yaml b/api/core/model_runtime/model_providers/nvidia/llm/mistralai_mixtral-8x7b-instruct-v0.1.yaml deleted file mode 100644 index d2c4dc5d93..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/mistralai_mixtral-8x7b-instruct-v0.1.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: mistralai/mixtral-8x7b-instruct-v0.1 -label: - zh_Hans: mistralai/mixtral-8x7b-instruct-v0.1 - en_US: mistralai/mixtral-8x7b-instruct-v0.1 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 1024 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/mixtral-8x22b-instruct-v0.1.yaml b/api/core/model_runtime/model_providers/nvidia/llm/mixtral-8x22b-instruct-v0.1.yaml deleted file mode 100644 index 05500c0336..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/mixtral-8x22b-instruct-v0.1.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: mistralai/mixtral-8x22b-instruct-v0.1 -label: - zh_Hans: mistralai/mixtral-8x22b-instruct-v0.1 - en_US: mistralai/mixtral-8x22b-instruct-v0.1 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 64000 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 1024 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/nemotron-4-340b-instruct.yaml b/api/core/model_runtime/model_providers/nvidia/llm/nemotron-4-340b-instruct.yaml deleted file mode 100644 index e5537cd2fd..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/nemotron-4-340b-instruct.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: nvidia/nemotron-4-340b-instruct -label: - zh_Hans: nvidia/nemotron-4-340b-instruct - en_US: nvidia/nemotron-4-340b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 4096 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/phi-3-medium-128k-instruct.yaml b/api/core/model_runtime/model_providers/nvidia/llm/phi-3-medium-128k-instruct.yaml deleted file mode 100644 index 0c5538d135..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/phi-3-medium-128k-instruct.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: microsoft/phi-3-medium-128k-instruct -label: - zh_Hans: microsoft/phi-3-medium-128k-instruct - en_US: microsoft/phi-3-medium-128k-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 4096 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/phi-3-mini-128k-instruct.yaml b/api/core/model_runtime/model_providers/nvidia/llm/phi-3-mini-128k-instruct.yaml deleted file mode 100644 index 1eb1c51d01..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/phi-3-mini-128k-instruct.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: microsoft/phi-3-mini-128k-instruct -label: - zh_Hans: microsoft/phi-3-mini-128k-instruct - en_US: microsoft/phi-3-mini-128k-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 4096 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/recurrentgemma-2b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/recurrentgemma-2b.yaml deleted file mode 100644 index 73fcce3930..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/llm/recurrentgemma-2b.yaml +++ /dev/null @@ -1,37 +0,0 @@ -model: google/recurrentgemma-2b -label: - zh_Hans: google/recurrentgemma-2b - en_US: google/recurrentgemma-2b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 2048 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.2 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 0.7 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 1024 - default: 1024 - - name: random_seed - type: int - help: - en_US: The seed to use for random sampling. If set, different calls will generate deterministic results. - zh_Hans: 当开启随机数种子以后,你可以通过指定一个固定的种子来使得回答结果更加稳定 - label: - en_US: Seed - zh_Hans: 种子 - default: 0 - min: 0 - max: 2147483647 diff --git a/api/core/model_runtime/model_providers/nvidia/nvidia.py b/api/core/model_runtime/model_providers/nvidia/nvidia.py deleted file mode 100644 index 058fa00346..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/nvidia.py +++ /dev/null @@ -1,26 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class MistralAIProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - model_instance.validate_credentials(model="mistralai/mixtral-8x7b-instruct-v0.1", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/nvidia/nvidia.yaml b/api/core/model_runtime/model_providers/nvidia/nvidia.yaml deleted file mode 100644 index ce894a3372..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/nvidia.yaml +++ /dev/null @@ -1,33 +0,0 @@ -provider: nvidia -label: - en_US: API Catalog -description: - en_US: API Catalog - zh_Hans: API Catalog -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.png -background: "#FFFFFF" -help: - title: - en_US: Get your API Key from NVIDIA - zh_Hans: 从 NVIDIA 获取 API Key - url: - en_US: https://build.nvidia.com/explore/discover -supported_model_types: - - llm - - text-embedding - - rerank -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/nvidia/rerank/__init__.py b/api/core/model_runtime/model_providers/nvidia/rerank/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/nvidia/rerank/rerank-qa-mistral-4b.yaml b/api/core/model_runtime/model_providers/nvidia/rerank/rerank-qa-mistral-4b.yaml deleted file mode 100644 index 461f4e1cbe..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/rerank/rerank-qa-mistral-4b.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: nv-rerank-qa-mistral-4b:1 -model_type: rerank -model_properties: - context_size: 512 diff --git a/api/core/model_runtime/model_providers/nvidia/rerank/rerank.py b/api/core/model_runtime/model_providers/nvidia/rerank/rerank.py deleted file mode 100644 index fabebc67ab..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/rerank/rerank.py +++ /dev/null @@ -1,121 +0,0 @@ -from math import exp -from typing import Optional - -import requests - -from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.rerank_model import RerankModel - - -class NvidiaRerankModel(RerankModel): - """ - Model class for NVIDIA rerank model. - """ - - def _sigmoid(self, logit: float) -> float: - return 1 / (1 + exp(-logit)) - - def _invoke( - self, - model: str, - credentials: dict, - query: str, - docs: list[str], - score_threshold: Optional[float] = None, - top_n: Optional[int] = None, - user: Optional[str] = None, - ) -> RerankResult: - """ - Invoke rerank model - - :param model: model name - :param credentials: model credentials - :param query: search query - :param docs: docs for reranking - :param score_threshold: score threshold - :param top_n: top n documents to return - :param user: unique user id - :return: rerank result - """ - if len(docs) == 0: - return RerankResult(model=model, docs=[]) - - try: - invoke_url = "https://ai.api.nvidia.com/v1/retrieval/nvidia/reranking" - - headers = { - "Authorization": f"Bearer {credentials.get('api_key')}", - "Accept": "application/json", - } - payload = { - "model": model, - "query": {"text": query}, - "passages": [{"text": doc} for doc in docs], - } - session = requests.Session() - response = session.post(invoke_url, headers=headers, json=payload) - response.raise_for_status() - results = response.json() - - rerank_documents = [] - for result in results["rankings"]: - index = result["index"] - logit = result["logit"] - rerank_document = RerankDocument( - index=index, - text=docs[index], - score=self._sigmoid(logit), - ) - - rerank_documents.append(rerank_document) - if rerank_documents: - rerank_documents = sorted(rerank_documents, key=lambda x: x.score, reverse=True) - if top_n: - rerank_documents = rerank_documents[:top_n] - return RerankResult(model=model, docs=rerank_documents) - except requests.HTTPError as e: - raise InvokeServerUnavailableError(str(e)) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke( - model=model, - credentials=credentials, - query="What is the GPU memory bandwidth of H100 SXM?", - docs=[ - "Example doc 1", - "Example doc 2", - "Example doc 3", - ], - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - """ - return { - InvokeConnectionError: [requests.ConnectionError], - InvokeServerUnavailableError: [requests.HTTPError], - InvokeRateLimitError: [], - InvokeAuthorizationError: [requests.HTTPError], - InvokeBadRequestError: [requests.RequestException], - } diff --git a/api/core/model_runtime/model_providers/nvidia/text_embedding/__init__.py b/api/core/model_runtime/model_providers/nvidia/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/nvidia/text_embedding/embed-qa-4.yaml b/api/core/model_runtime/model_providers/nvidia/text_embedding/embed-qa-4.yaml deleted file mode 100644 index a9b5e25c3c..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/text_embedding/embed-qa-4.yaml +++ /dev/null @@ -1,5 +0,0 @@ -model: NV-Embed-QA -model_type: text-embedding -model_properties: - context_size: 512 - max_chunks: 1 diff --git a/api/core/model_runtime/model_providers/nvidia_nim/__init__.py b/api/core/model_runtime/model_providers/nvidia_nim/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/nvidia_nim/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/nvidia_nim/_assets/icon_l_en.png deleted file mode 100644 index 5a7f42e617..0000000000 Binary files a/api/core/model_runtime/model_providers/nvidia_nim/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/nvidia_nim/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/nvidia_nim/_assets/icon_s_en.svg deleted file mode 100644 index 9fc02f9164..0000000000 --- a/api/core/model_runtime/model_providers/nvidia_nim/_assets/icon_s_en.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/api/core/model_runtime/model_providers/nvidia_nim/llm/__init__.py b/api/core/model_runtime/model_providers/nvidia_nim/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/nvidia_nim/llm/llm.py b/api/core/model_runtime/model_providers/nvidia_nim/llm/llm.py deleted file mode 100644 index 6ff380bdd9..0000000000 --- a/api/core/model_runtime/model_providers/nvidia_nim/llm/llm.py +++ /dev/null @@ -1,13 +0,0 @@ -import logging - -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel - -logger = logging.getLogger(__name__) - - -class NVIDIANIMProvider(OAIAPICompatLargeLanguageModel): - """ - Model class for NVIDIA NIM large language model. - """ - - pass diff --git a/api/core/model_runtime/model_providers/nvidia_nim/nvidia_nim.py b/api/core/model_runtime/model_providers/nvidia_nim/nvidia_nim.py deleted file mode 100644 index ad890ada22..0000000000 --- a/api/core/model_runtime/model_providers/nvidia_nim/nvidia_nim.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class NVIDIANIMProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/nvidia_nim/nvidia_nim.yaml b/api/core/model_runtime/model_providers/nvidia_nim/nvidia_nim.yaml deleted file mode 100644 index 0e892665d7..0000000000 --- a/api/core/model_runtime/model_providers/nvidia_nim/nvidia_nim.yaml +++ /dev/null @@ -1,79 +0,0 @@ -provider: nvidia_nim -label: - en_US: NVIDIA NIM -description: - en_US: NVIDIA NIM, a set of easy-to-use inference microservices. - zh_Hans: NVIDIA NIM,一组易于使用的模型推理微服务。 -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.png -background: "#EFFDFD" -help: - title: - en_US: Learn more about NVIDIA NIM - zh_Hans: 了解 NVIDIA NIM 更多信息 - url: - en_US: https://www.nvidia.com/en-us/ai/ -supported_model_types: - - llm -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter full model name - zh_Hans: 输入模型全称 - credential_form_schemas: - - variable: endpoint_url - label: - zh_Hans: API endpoint URL - en_US: API endpoint URL - type: text-input - required: true - placeholder: - zh_Hans: Base URL, e.g. http://192.168.1.100:8000/v1 - en_US: Base URL, e.g. http://192.168.1.100:8000/v1 - - variable: mode - show_on: - - variable: __model_type - value: llm - label: - en_US: Completion mode - type: select - required: false - default: chat - placeholder: - zh_Hans: 选择对话类型 - en_US: Select completion mode - options: - - value: completion - label: - en_US: Completion - zh_Hans: 补全 - - value: chat - label: - en_US: Chat - zh_Hans: 对话 - - variable: context_size - label: - zh_Hans: 模型上下文长度 - en_US: Model context size - required: true - type: text-input - default: '4096' - placeholder: - zh_Hans: 在此输入您的模型上下文长度 - en_US: Enter your Model context size - - variable: max_tokens_to_sample - label: - zh_Hans: 最大 token 上限 - en_US: Upper bound for max tokens - show_on: - - variable: __model_type - value: llm - default: '4096' - type: text-input diff --git a/api/core/model_runtime/model_providers/oci/__init__.py b/api/core/model_runtime/model_providers/oci/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/oci/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/oci/_assets/icon_l_en.svg deleted file mode 100644 index 0981dfcff2..0000000000 --- a/api/core/model_runtime/model_providers/oci/_assets/icon_l_en.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/oci/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/oci/_assets/icon_s_en.svg deleted file mode 100644 index 0981dfcff2..0000000000 --- a/api/core/model_runtime/model_providers/oci/_assets/icon_s_en.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/oci/llm/cohere.command-r-16k.yaml b/api/core/model_runtime/model_providers/oci/llm/cohere.command-r-16k.yaml deleted file mode 100644 index eb60cbcd90..0000000000 --- a/api/core/model_runtime/model_providers/oci/llm/cohere.command-r-16k.yaml +++ /dev/null @@ -1,52 +0,0 @@ -model: cohere.command-r-16k -label: - en_US: cohere.command-r-16k v1.2 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - default: 1 - max: 1.0 - - name: topP - use_template: top_p - default: 0.75 - min: 0 - max: 1 - - name: topK - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presencePenalty - use_template: presence_penalty - min: 0 - max: 1 - default: 0 - - name: frequencyPenalty - use_template: frequency_penalty - min: 0 - max: 1 - default: 0 - - name: maxTokens - use_template: max_tokens - default: 600 - max: 4000 -pricing: - input: '0.004' - output: '0.004' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/oci/llm/cohere.command-r-plus.yaml b/api/core/model_runtime/model_providers/oci/llm/cohere.command-r-plus.yaml deleted file mode 100644 index df31b0d0df..0000000000 --- a/api/core/model_runtime/model_providers/oci/llm/cohere.command-r-plus.yaml +++ /dev/null @@ -1,52 +0,0 @@ -model: cohere.command-r-plus -label: - en_US: cohere.command-r-plus v1.2 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - default: 1 - max: 1.0 - - name: topP - use_template: top_p - default: 0.75 - min: 0 - max: 1 - - name: topK - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presencePenalty - use_template: presence_penalty - min: 0 - max: 1 - default: 0 - - name: frequencyPenalty - use_template: frequency_penalty - min: 0 - max: 1 - default: 0 - - name: maxTokens - use_template: max_tokens - default: 600 - max: 4000 -pricing: - input: '0.0219' - output: '0.0219' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/oci/llm/llm.py b/api/core/model_runtime/model_providers/oci/llm/llm.py deleted file mode 100644 index 1e1fc5b3ea..0000000000 --- a/api/core/model_runtime/model_providers/oci/llm/llm.py +++ /dev/null @@ -1,469 +0,0 @@ -import base64 -import copy -import json -import logging -from collections.abc import Generator -from typing import Optional, Union - -import oci -from oci.generative_ai_inference.models.base_chat_response import BaseChatResponse - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel - -logger = logging.getLogger(__name__) - -request_template = { - "compartmentId": "", - "servingMode": {"modelId": "cohere.command-r-plus", "servingType": "ON_DEMAND"}, - "chatRequest": { - "apiFormat": "COHERE", - # "preambleOverride": "You are a helpful assistant.", - # "message": "Hello!", - # "chatHistory": [], - "maxTokens": 600, - "isStream": False, - "frequencyPenalty": 0, - "presencePenalty": 0, - "temperature": 1, - "topP": 0.75, - }, -} -oci_config_template = { - "user": "", - "fingerprint": "", - "tenancy": "", - "region": "", - "compartment_id": "", - "key_content": "", -} - - -class OCILargeLanguageModel(LargeLanguageModel): - # https://docs.oracle.com/en-us/iaas/Content/generative-ai/pretrained-models.htm - _supported_models = { - "meta.llama-3-70b-instruct": { - "system": True, - "multimodal": False, - "tool_call": False, - "stream_tool_call": False, - }, - "cohere.command-r-16k": { - "system": True, - "multimodal": False, - "tool_call": True, - "stream_tool_call": False, - }, - "cohere.command-r-plus": { - "system": True, - "multimodal": False, - "tool_call": True, - "stream_tool_call": False, - }, - } - - def _is_tool_call_supported(self, model_id: str, stream: bool = False) -> bool: - feature = self._supported_models.get(model_id) - if not feature: - return False - return feature["stream_tool_call"] if stream else feature["tool_call"] - - def _is_multimodal_supported(self, model_id: str) -> bool: - feature = self._supported_models.get(model_id) - if not feature: - return False - return feature["multimodal"] - - def _is_system_prompt_supported(self, model_id: str) -> bool: - feature = self._supported_models.get(model_id) - if not feature: - return False - return feature["system"] - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # print("model"+"*"*20) - # print(model) - # print("credentials"+"*"*20) - # print(credentials) - # print("model_parameters"+"*"*20) - # print(model_parameters) - # print("prompt_messages"+"*"*200) - # print(prompt_messages) - # print("tools"+"*"*20) - # print(tools) - - # invoke model - return self._generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return:md = genai.GenerativeModel(model) - """ - prompt = self._convert_messages_to_prompt(prompt_messages) - - return self._get_num_tokens_by_gpt2(prompt) - - def get_num_characters( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return:md = genai.GenerativeModel(model) - """ - prompt = self._convert_messages_to_prompt(prompt_messages) - - return len(prompt) - - def _convert_messages_to_prompt(self, messages: list[PromptMessage]) -> str: - """ - :param messages: List of PromptMessage to combine. - :return: Combined string with necessary human_prompt and ai_prompt tags. - """ - messages = messages.copy() # don't mutate the original list - - text = "".join(self._convert_one_message_to_text(message) for message in messages) - - return text.rstrip() - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - # Setup basic variables - # Auth Config - try: - ping_message = SystemPromptMessage(content="ping") - self._generate(model, credentials, [ping_message], {"maxTokens": 5}) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: credentials kwargs - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # config_kwargs = model_parameters.copy() - # config_kwargs['max_output_tokens'] = config_kwargs.pop('max_tokens_to_sample', None) - # if stop: - # config_kwargs["stop_sequences"] = stop - - # initialize client - # ref: https://docs.oracle.com/en-us/iaas/api/#/en/generative-ai-inference/20231130/ChatResult/Chat - oci_config = copy.deepcopy(oci_config_template) - if "oci_config_content" in credentials: - oci_config_content = base64.b64decode(credentials.get("oci_config_content")).decode("utf-8") - config_items = oci_config_content.split("/") - if len(config_items) != 5: - raise CredentialsValidateFailedError( - "oci_config_content should be base64.b64encode(" - "'user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" - ) - oci_config["user"] = config_items[0] - oci_config["fingerprint"] = config_items[1] - oci_config["tenancy"] = config_items[2] - oci_config["region"] = config_items[3] - oci_config["compartment_id"] = config_items[4] - else: - raise CredentialsValidateFailedError("need to set oci_config_content in credentials ") - if "oci_key_content" in credentials: - oci_key_content = base64.b64decode(credentials.get("oci_key_content")).decode("utf-8") - oci_config["key_content"] = oci_key_content.encode(encoding="utf-8") - else: - raise CredentialsValidateFailedError("need to set oci_config_content in credentials ") - - # oci_config = oci.config.from_file('~/.oci/config', credentials.get('oci_api_profile')) - compartment_id = oci_config["compartment_id"] - client = oci.generative_ai_inference.GenerativeAiInferenceClient(config=oci_config) - # call embedding model - request_args = copy.deepcopy(request_template) - request_args["compartmentId"] = compartment_id - request_args["servingMode"]["modelId"] = model - - chat_history = [] - system_prompts = [] - # if "meta.llama" in model: - # request_args["chatRequest"]["apiFormat"] = "GENERIC" - request_args["chatRequest"]["maxTokens"] = model_parameters.pop("maxTokens", 600) - request_args["chatRequest"].update(model_parameters) - frequency_penalty = model_parameters.get("frequencyPenalty", 0) - presence_penalty = model_parameters.get("presencePenalty", 0) - if frequency_penalty > 0 and presence_penalty > 0: - raise InvokeBadRequestError("Cannot set both frequency penalty and presence penalty") - - # for msg in prompt_messages: # makes message roles strictly alternating - # content = self._format_message_to_glm_content(msg) - # if history and history[-1]["role"] == content["role"]: - # history[-1]["parts"].extend(content["parts"]) - # else: - # history.append(content) - - # temporary not implement the tool call function - valid_value = self._is_tool_call_supported(model, stream) - if tools is not None and len(tools) > 0: - if not valid_value: - raise InvokeBadRequestError("Does not support function calling") - if model.startswith("cohere"): - # print("run cohere " * 10) - for message in prompt_messages[:-1]: - text = "" - if isinstance(message.content, str): - text = message.content - if isinstance(message, UserPromptMessage): - chat_history.append({"role": "USER", "message": text}) - else: - chat_history.append({"role": "CHATBOT", "message": text}) - if isinstance(message, SystemPromptMessage): - if isinstance(message.content, str): - system_prompts.append(message.content) - args = { - "apiFormat": "COHERE", - "preambleOverride": " ".join(system_prompts), - "message": prompt_messages[-1].content, - "chatHistory": chat_history, - } - request_args["chatRequest"].update(args) - elif model.startswith("meta"): - # print("run meta " * 10) - meta_messages = [] - for message in prompt_messages: - text = message.content - meta_messages.append({"role": message.role.name, "content": [{"type": "TEXT", "text": text}]}) - args = {"apiFormat": "GENERIC", "messages": meta_messages, "numGenerations": 1, "topK": -1} - request_args["chatRequest"].update(args) - - if stream: - request_args["chatRequest"]["isStream"] = True - # print("final request" + "|" * 20) - # print(request_args) - response = client.chat(request_args) - # print(vars(response)) - - if stream: - return self._handle_generate_stream_response(model, credentials, response, prompt_messages) - - return self._handle_generate_response(model, credentials, response, prompt_messages) - - def _handle_generate_response( - self, model: str, credentials: dict, response: BaseChatResponse, prompt_messages: list[PromptMessage] - ) -> LLMResult: - """ - Handle llm response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response - """ - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=response.data.chat_response.text) - - # calculate num tokens - prompt_tokens = self.get_num_characters(model, credentials, prompt_messages) - completion_tokens = self.get_num_characters(model, credentials, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - result = LLMResult( - model=model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - ) - - return result - - def _handle_generate_stream_response( - self, model: str, credentials: dict, response: BaseChatResponse, prompt_messages: list[PromptMessage] - ) -> Generator: - """ - Handle llm stream response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response chunk generator result - """ - index = -1 - events = response.data.events() - for stream in events: - chunk = json.loads(stream.data) - # print(chunk) - # chunk: {'apiFormat': 'COHERE', 'text': 'Hello'} - - # for chunk in response: - # for part in chunk.parts: - # if part.function_call: - # assistant_prompt_message.tool_calls = [ - # AssistantPromptMessage.ToolCall( - # id=part.function_call.name, - # type='function', - # function=AssistantPromptMessage.ToolCall.ToolCallFunction( - # name=part.function_call.name, - # arguments=json.dumps(dict(part.function_call.args.items())) - # ) - # ) - # ] - - if "finishReason" not in chunk: - assistant_prompt_message = AssistantPromptMessage(content="") - if model.startswith("cohere"): - if chunk["text"]: - assistant_prompt_message.content += chunk["text"] - elif model.startswith("meta"): - assistant_prompt_message.content += chunk["message"]["content"][0]["text"] - index += 1 - # transform assistant message to prompt message - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta(index=index, message=assistant_prompt_message), - ) - else: - # calculate num tokens - prompt_tokens = self.get_num_characters(model, credentials, prompt_messages) - completion_tokens = self.get_num_characters(model, credentials, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=assistant_prompt_message, - finish_reason=str(chunk["finishReason"]), - usage=usage, - ), - ) - - def _convert_one_message_to_text(self, message: PromptMessage) -> str: - """ - Convert a single message to a string. - - :param message: PromptMessage to convert. - :return: String representation of the message. - """ - human_prompt = "\n\nuser:" - ai_prompt = "\n\nmodel:" - - content = message.content - if isinstance(content, list): - content = "".join(c.data for c in content if c.type != PromptMessageContentType.IMAGE) - - if isinstance(message, UserPromptMessage): - message_text = f"{human_prompt} {content}" - elif isinstance(message, AssistantPromptMessage): - message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage | ToolPromptMessage): - message_text = f"{human_prompt} {content}" - else: - raise ValueError(f"Got unknown type {message}") - - return message_text - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [], - InvokeServerUnavailableError: [], - InvokeRateLimitError: [], - InvokeAuthorizationError: [], - InvokeBadRequestError: [], - } diff --git a/api/core/model_runtime/model_providers/oci/llm/meta.llama-3-70b-instruct.yaml b/api/core/model_runtime/model_providers/oci/llm/meta.llama-3-70b-instruct.yaml deleted file mode 100644 index dd5be107c0..0000000000 --- a/api/core/model_runtime/model_providers/oci/llm/meta.llama-3-70b-instruct.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: meta.llama-3-70b-instruct -label: - zh_Hans: meta.llama-3-70b-instruct - en_US: meta.llama-3-70b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - default: 1 - max: 2.0 - - name: topP - use_template: top_p - default: 0.75 - min: 0 - max: 1 - - name: topK - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presencePenalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 - - name: frequencyPenalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: maxTokens - use_template: max_tokens - default: 600 - max: 8000 -pricing: - input: '0.015' - output: '0.015' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/oci/oci.py b/api/core/model_runtime/model_providers/oci/oci.py deleted file mode 100644 index e182d2d043..0000000000 --- a/api/core/model_runtime/model_providers/oci/oci.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class OCIGENAIProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `cohere.command-r-plus` model for validate, - model_instance.validate_credentials(model="cohere.command-r-plus", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/oci/oci.yaml b/api/core/model_runtime/model_providers/oci/oci.yaml deleted file mode 100644 index f2f23e18f1..0000000000 --- a/api/core/model_runtime/model_providers/oci/oci.yaml +++ /dev/null @@ -1,42 +0,0 @@ -provider: oci -label: - en_US: OCIGenerativeAI -description: - en_US: Models provided by OCI, such as Cohere Command R and Cohere Command R+. - zh_Hans: OCI 提供的模型,例如 Cohere Command R 和 Cohere Command R+。 -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#FFFFFF" -help: - title: - en_US: Get your API Key from OCI - zh_Hans: 从 OCI 获取 API Key - url: - en_US: https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm -supported_model_types: - - llm - - text-embedding - #- rerank -configurate_methods: - - predefined-model - #- customizable-model -provider_credential_schema: - credential_form_schemas: - - variable: oci_config_content - label: - en_US: oci api key config file's content - type: text-input - required: true - placeholder: - zh_Hans: 在此输入您的 oci api key config 文件的内容(base64.b64encode("user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid".encode('utf-8')) ) - en_US: Enter your oci api key config file's content(base64.b64encode("user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid".encode('utf-8')) ) - - variable: oci_key_content - label: - en_US: oci api key file's content - type: text-input - required: true - placeholder: - zh_Hans: 在此输入您的 oci api key 文件的内容(base64.b64encode("pem file content".encode('utf-8'))) - en_US: Enter your oci api key file's content(base64.b64encode("pem file content".encode('utf-8'))) diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/__init__.py b/api/core/model_runtime/model_providers/oci/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/_position.yaml b/api/core/model_runtime/model_providers/oci/text_embedding/_position.yaml deleted file mode 100644 index 149f1e3797..0000000000 --- a/api/core/model_runtime/model_providers/oci/text_embedding/_position.yaml +++ /dev/null @@ -1,5 +0,0 @@ -- cohere.embed-english-light-v2.0 -- cohere.embed-english-light-v3.0 -- cohere.embed-english-v3.0 -- cohere.embed-multilingual-light-v3.0 -- cohere.embed-multilingual-v3.0 diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-english-light-v2.0.yaml b/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-english-light-v2.0.yaml deleted file mode 100644 index 259d5b45b7..0000000000 --- a/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-english-light-v2.0.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: cohere.embed-english-light-v2.0 -model_type: text-embedding -model_properties: - context_size: 1024 - max_chunks: 48 -pricing: - input: '0.001' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-english-light-v3.0.yaml b/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-english-light-v3.0.yaml deleted file mode 100644 index 065e7474c0..0000000000 --- a/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-english-light-v3.0.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: cohere.embed-english-light-v3.0 -model_type: text-embedding -model_properties: - context_size: 384 - max_chunks: 48 -pricing: - input: '0.001' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-english-v3.0.yaml b/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-english-v3.0.yaml deleted file mode 100644 index 3e2deea16a..0000000000 --- a/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-english-v3.0.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: cohere.embed-english-v3.0 -model_type: text-embedding -model_properties: - context_size: 1024 - max_chunks: 48 -pricing: - input: '0.001' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-multilingual-light-v3.0.yaml b/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-multilingual-light-v3.0.yaml deleted file mode 100644 index 0d2b892c64..0000000000 --- a/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-multilingual-light-v3.0.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: cohere.embed-multilingual-light-v3.0 -model_type: text-embedding -model_properties: - context_size: 384 - max_chunks: 48 -pricing: - input: '0.001' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-multilingual-v3.0.yaml b/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-multilingual-v3.0.yaml deleted file mode 100644 index 9ebe260b32..0000000000 --- a/api/core/model_runtime/model_providers/oci/text_embedding/cohere.embed-multilingual-v3.0.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: cohere.embed-multilingual-v3.0 -model_type: text-embedding -model_properties: - context_size: 1024 - max_chunks: 48 -pricing: - input: '0.001' - unit: '0.0001' - currency: USD diff --git a/api/core/model_runtime/model_providers/ollama/__init__.py b/api/core/model_runtime/model_providers/ollama/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/ollama/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/ollama/_assets/icon_l_en.svg deleted file mode 100644 index 39d8a1ece6..0000000000 --- a/api/core/model_runtime/model_providers/ollama/_assets/icon_l_en.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/ollama/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/ollama/_assets/icon_s_en.svg deleted file mode 100644 index f8482a96b9..0000000000 --- a/api/core/model_runtime/model_providers/ollama/_assets/icon_s_en.svg +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/ollama/llm/__init__.py b/api/core/model_runtime/model_providers/ollama/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/ollama/llm/llm.py b/api/core/model_runtime/model_providers/ollama/llm/llm.py deleted file mode 100644 index a7ea53e0e9..0000000000 --- a/api/core/model_runtime/model_providers/ollama/llm/llm.py +++ /dev/null @@ -1,726 +0,0 @@ -import json -import logging -import re -from collections.abc import Generator -from decimal import Decimal -from typing import Optional, Union, cast -from urllib.parse import urljoin - -import requests - -from core.model_runtime.entities.llm_entities import ( - LLMMode, - LLMResult, - LLMResultChunk, - LLMResultChunkDelta, -) -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - DefaultParameterName, - FetchFrom, - I18nObject, - ModelFeature, - ModelPropertyKey, - ModelType, - ParameterRule, - ParameterType, - PriceConfig, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import ( - LargeLanguageModel, -) - -logger = logging.getLogger(__name__) - - -class OllamaLargeLanguageModel(LargeLanguageModel): - """ - Model class for Ollama large language model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - return self._generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - stop=stop, - stream=stream, - user=user, - ) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: - """ - # get model mode - model_mode = self.get_model_mode(model, credentials) - - if model_mode == LLMMode.CHAT: - # chat model - return self._num_tokens_from_messages(prompt_messages) - else: - first_prompt_message = prompt_messages[0] - if isinstance(first_prompt_message.content, str): - text = first_prompt_message.content - else: - text = "" - for message_content in first_prompt_message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - text = message_content.data - break - return self._get_num_tokens_by_gpt2(text) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._generate( - model=model, - credentials=credentials, - prompt_messages=[UserPromptMessage(content="ping")], - model_parameters={"num_predict": 5}, - stream=False, - ) - except InvokeError as ex: - raise CredentialsValidateFailedError(f"An error occurred during credentials validation: {ex.description}") - except Exception as ex: - raise CredentialsValidateFailedError(f"An error occurred during credentials validation: {str(ex)}") - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke llm completion model - - :param model: model name - :param credentials: credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - headers = {"Content-Type": "application/json"} - - endpoint_url = credentials["base_url"] - if not endpoint_url.endswith("/"): - endpoint_url += "/" - - # prepare the payload for a simple ping to the model - data = {"model": model, "stream": stream} - - if "format" in model_parameters: - data["format"] = model_parameters["format"] - del model_parameters["format"] - - if "keep_alive" in model_parameters: - data["keep_alive"] = model_parameters["keep_alive"] - del model_parameters["keep_alive"] - - data["options"] = model_parameters or {} - - if stop: - data["options"]["stop"] = stop - - completion_type = LLMMode.value_of(credentials["mode"]) - - if completion_type is LLMMode.CHAT: - endpoint_url = urljoin(endpoint_url, "api/chat") - data["messages"] = [self._convert_prompt_message_to_dict(m) for m in prompt_messages] - else: - endpoint_url = urljoin(endpoint_url, "api/generate") - first_prompt_message = prompt_messages[0] - if isinstance(first_prompt_message, UserPromptMessage): - first_prompt_message = cast(UserPromptMessage, first_prompt_message) - if isinstance(first_prompt_message.content, str): - data["prompt"] = first_prompt_message.content - else: - text = "" - images = [] - for message_content in first_prompt_message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - text = message_content.data - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - image_data = re.sub( - r"^data:image\/[a-zA-Z]+;base64,", - "", - message_content.data, - ) - images.append(image_data) - - data["prompt"] = text - data["images"] = images - - # send a post request to validate the credentials - response = requests.post(endpoint_url, headers=headers, json=data, timeout=(10, 300), stream=stream) - - response.encoding = "utf-8" - if response.status_code != 200: - raise InvokeError(f"API request failed with status code {response.status_code}: {response.text}") - - if stream: - return self._handle_generate_stream_response(model, credentials, completion_type, response, prompt_messages) - - return self._handle_generate_response(model, credentials, completion_type, response, prompt_messages) - - def _handle_generate_response( - self, - model: str, - credentials: dict, - completion_type: LLMMode, - response: requests.Response, - prompt_messages: list[PromptMessage], - ) -> LLMResult: - """ - Handle llm completion response - - :param model: model name - :param credentials: model credentials - :param completion_type: completion type - :param response: response - :param prompt_messages: prompt messages - :return: llm result - """ - response_json = response.json() - - if completion_type is LLMMode.CHAT: - message = response_json.get("message", {}) - response_content = message.get("content", "") - else: - response_content = response_json["response"] - - assistant_message = AssistantPromptMessage(content=response_content) - - if "prompt_eval_count" in response_json and "eval_count" in response_json: - # transform usage - prompt_tokens = response_json["prompt_eval_count"] - completion_tokens = response_json["eval_count"] - else: - # calculate num tokens - prompt_tokens = self._get_num_tokens_by_gpt2(prompt_messages[0].content) - completion_tokens = self._get_num_tokens_by_gpt2(assistant_message.content) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - result = LLMResult( - model=response_json["model"], - prompt_messages=prompt_messages, - message=assistant_message, - usage=usage, - ) - - return result - - def _handle_generate_stream_response( - self, - model: str, - credentials: dict, - completion_type: LLMMode, - response: requests.Response, - prompt_messages: list[PromptMessage], - ) -> Generator: - """ - Handle llm completion stream response - - :param model: model name - :param credentials: model credentials - :param completion_type: completion type - :param response: response - :param prompt_messages: prompt messages - :return: llm response chunk generator result - """ - full_text = "" - chunk_index = 0 - - def create_final_llm_result_chunk( - index: int, message: AssistantPromptMessage, finish_reason: str - ) -> LLMResultChunk: - # calculate num tokens - prompt_tokens = self._get_num_tokens_by_gpt2(prompt_messages[0].content) - completion_tokens = self._get_num_tokens_by_gpt2(full_text) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - return LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=message, - finish_reason=finish_reason, - usage=usage, - ), - ) - - for chunk in response.iter_lines(decode_unicode=True, delimiter="\n"): - if not chunk: - continue - - try: - chunk_json = json.loads(chunk) - # stream ended - except json.JSONDecodeError as e: - yield create_final_llm_result_chunk( - index=chunk_index, - message=AssistantPromptMessage(content=""), - finish_reason="Non-JSON encountered.", - ) - - chunk_index += 1 - break - - if completion_type is LLMMode.CHAT: - if not chunk_json: - continue - - if "message" not in chunk_json: - text = "" - else: - text = chunk_json.get("message").get("content", "") - else: - if not chunk_json: - continue - - # transform assistant message to prompt message - text = chunk_json["response"] - - assistant_prompt_message = AssistantPromptMessage(content=text) - - full_text += text - - if chunk_json["done"]: - # calculate num tokens - if "prompt_eval_count" in chunk_json: - prompt_tokens = chunk_json["prompt_eval_count"] - else: - prompt_message_content = prompt_messages[0].content - if isinstance(prompt_message_content, str): - prompt_tokens = self._get_num_tokens_by_gpt2(prompt_message_content) - else: - content_text = "" - for message_content in prompt_message_content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - content_text += message_content.data - prompt_tokens = self._get_num_tokens_by_gpt2(content_text) - - completion_tokens = chunk_json.get("eval_count", self._get_num_tokens_by_gpt2(full_text)) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( - model=chunk_json["model"], - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=chunk_index, - message=assistant_prompt_message, - finish_reason="stop", - usage=usage, - ), - ) - else: - yield LLMResultChunk( - model=chunk_json["model"], - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=chunk_index, - message=assistant_prompt_message, - ), - ) - - chunk_index += 1 - - def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: - """ - Convert PromptMessage to dict for Ollama API - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - text = "" - images = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - text = message_content.data - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - image_data = re.sub(r"^data:image\/[a-zA-Z]+;base64,", "", message_content.data) - images.append(image_data) - - message_dict = {"role": "user", "content": text, "images": images} - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - else: - raise ValueError(f"Got unknown type {message}") - - return message_dict - - def _num_tokens_from_messages(self, messages: list[PromptMessage]) -> int: - """ - Calculate num tokens. - - :param messages: messages - """ - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - for key, value in message.items(): - num_tokens += self._get_num_tokens_by_gpt2(str(key)) - num_tokens += self._get_num_tokens_by_gpt2(str(value)) - - return num_tokens - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - Get customizable model schema. - - :param model: model name - :param credentials: credentials - - :return: model schema - """ - extras = {} - - if "vision_support" in credentials and credentials["vision_support"] == "true": - extras["features"] = [ModelFeature.VISION] - - entity = AIModelEntity( - model=model, - label=I18nObject(zh_Hans=model, en_US=model), - model_type=ModelType.LLM, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.MODE: credentials.get("mode"), - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 4096)), - }, - parameter_rules=[ - ParameterRule( - name=DefaultParameterName.TEMPERATURE.value, - use_template=DefaultParameterName.TEMPERATURE.value, - label=I18nObject(en_US="Temperature", zh_Hans="温度"), - type=ParameterType.FLOAT, - help=I18nObject( - en_US="The temperature of the model. " - "Increasing the temperature will make the model answer " - "more creatively. (Default: 0.8)", - zh_Hans="模型的温度。增加温度将使模型的回答更具创造性。(默认值:0.8)", - ), - default=0.1, - min=0, - max=1, - ), - ParameterRule( - name=DefaultParameterName.TOP_P.value, - use_template=DefaultParameterName.TOP_P.value, - label=I18nObject(en_US="Top P", zh_Hans="Top P"), - type=ParameterType.FLOAT, - help=I18nObject( - en_US="Works together with top-k. A higher value (e.g., 0.95) will lead to " - "more diverse text, while a lower value (e.g., 0.5) will generate more " - "focused and conservative text. (Default: 0.9)", - zh_Hans="与top-k一起工作。较高的值(例如,0.95)会导致生成更多样化的文本,而较低的值(例如,0.5)会生成更专注和保守的文本。(默认值:0.9)", - ), - default=0.9, - min=0, - max=1, - ), - ParameterRule( - name="top_k", - label=I18nObject(en_US="Top K", zh_Hans="Top K"), - type=ParameterType.INT, - help=I18nObject( - en_US="Reduces the probability of generating nonsense. " - "A higher value (e.g. 100) will give more diverse answers, " - "while a lower value (e.g. 10) will be more conservative. (Default: 40)", - zh_Hans="减少生成无意义内容的可能性。较高的值(例如100)将提供更多样化的答案,而较低的值(例如10)将更为保守。(默认值:40)", - ), - min=1, - max=100, - ), - ParameterRule( - name="repeat_penalty", - label=I18nObject(en_US="Repeat Penalty"), - type=ParameterType.FLOAT, - help=I18nObject( - en_US="Sets how strongly to penalize repetitions. " - "A higher value (e.g., 1.5) will penalize repetitions more strongly, " - "while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)", - zh_Hans="设置对重复内容的惩罚强度。一个较高的值(例如,1.5)会更强地惩罚重复内容,而一个较低的值(例如,0.9)则会相对宽容。(默认值:1.1)", - ), - min=-2, - max=2, - ), - ParameterRule( - name="num_predict", - use_template="max_tokens", - label=I18nObject(en_US="Num Predict", zh_Hans="最大令牌数预测"), - type=ParameterType.INT, - help=I18nObject( - en_US="Maximum number of tokens to predict when generating text. " - "(Default: 128, -1 = infinite generation, -2 = fill context)", - zh_Hans="生成文本时预测的最大令牌数。(默认值:128,-1 = 无限生成,-2 = 填充上下文)", - ), - default=(512 if int(credentials.get("max_tokens", 4096)) >= 768 else 128), - min=-2, - max=int(credentials.get("max_tokens", 4096)), - ), - ParameterRule( - name="mirostat", - label=I18nObject(en_US="Mirostat sampling", zh_Hans="Mirostat 采样"), - type=ParameterType.INT, - help=I18nObject( - en_US="Enable Mirostat sampling for controlling perplexity. " - "(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", - zh_Hans="启用 Mirostat 采样以控制困惑度。" - "(默认值:0,0 = 禁用,1 = Mirostat,2 = Mirostat 2.0)", - ), - min=0, - max=2, - ), - ParameterRule( - name="mirostat_eta", - label=I18nObject(en_US="Mirostat Eta", zh_Hans="学习率"), - type=ParameterType.FLOAT, - help=I18nObject( - en_US="Influences how quickly the algorithm responds to feedback from " - "the generated text. A lower learning rate will result in slower adjustments, " - "while a higher learning rate will make the algorithm more responsive. " - "(Default: 0.1)", - zh_Hans="影响算法对生成文本反馈响应的速度。较低的学习率会导致调整速度变慢,而较高的学习率会使得算法更加灵敏。(默认值:0.1)", - ), - precision=1, - ), - ParameterRule( - name="mirostat_tau", - label=I18nObject(en_US="Mirostat Tau", zh_Hans="文本连贯度"), - type=ParameterType.FLOAT, - help=I18nObject( - en_US="Controls the balance between coherence and diversity of the output. " - "A lower value will result in more focused and coherent text. (Default: 5.0)", - zh_Hans="控制输出的连贯性和多样性之间的平衡。较低的值会导致更专注和连贯的文本。(默认值:5.0)", - ), - precision=1, - ), - ParameterRule( - name="num_ctx", - label=I18nObject(en_US="Size of context window", zh_Hans="上下文窗口大小"), - type=ParameterType.INT, - help=I18nObject( - en_US="Sets the size of the context window used to generate the next token. (Default: 2048)", - zh_Hans="设置用于生成下一个标记的上下文窗口大小。(默认值:2048)", - ), - default=2048, - min=1, - ), - ParameterRule( - name="num_gpu", - label=I18nObject(en_US="GPU Layers", zh_Hans="GPU 层数"), - type=ParameterType.INT, - help=I18nObject( - en_US="The number of layers to offload to the GPU(s). " - "On macOS it defaults to 1 to enable metal support, 0 to disable." - "As long as a model fits into one gpu it stays in one. " - "It does not set the number of GPU(s). ", - zh_Hans="加载到 GPU 的层数。在 macOS 上,默认为 1 以启用 Metal 支持,设置为 0 则禁用。" - "只要模型适合一个 GPU,它就保留在其中。它不设置 GPU 的数量。", - ), - min=-1, - default=1, - ), - ParameterRule( - name="num_thread", - label=I18nObject(en_US="Num Thread", zh_Hans="线程数"), - type=ParameterType.INT, - help=I18nObject( - en_US="Sets the number of threads to use during computation. " - "By default, Ollama will detect this for optimal performance. " - "It is recommended to set this value to the number of physical CPU cores " - "your system has (as opposed to the logical number of cores).", - zh_Hans="设置计算过程中使用的线程数。默认情况下,Ollama会检测以获得最佳性能。建议将此值设置为系统拥有的物理CPU核心数(而不是逻辑核心数)。", - ), - min=1, - ), - ParameterRule( - name="repeat_last_n", - label=I18nObject(en_US="Repeat last N", zh_Hans="回溯内容"), - type=ParameterType.INT, - help=I18nObject( - en_US="Sets how far back for the model to look back to prevent repetition. " - "(Default: 64, 0 = disabled, -1 = num_ctx)", - zh_Hans="设置模型回溯多远的内容以防止重复。(默认值:64,0 = 禁用,-1 = num_ctx)", - ), - min=-1, - ), - ParameterRule( - name="tfs_z", - label=I18nObject(en_US="TFS Z", zh_Hans="减少标记影响"), - type=ParameterType.FLOAT, - help=I18nObject( - en_US="Tail free sampling is used to reduce the impact of less probable tokens " - "from the output. A higher value (e.g., 2.0) will reduce the impact more, " - "while a value of 1.0 disables this setting. (default: 1)", - zh_Hans="用于减少输出中不太可能的标记的影响。较高的值(例如,2.0)会更多地减少这种影响,而1.0的值则会禁用此设置。(默认值:1)", - ), - precision=1, - ), - ParameterRule( - name="seed", - label=I18nObject(en_US="Seed", zh_Hans="随机数种子"), - type=ParameterType.INT, - help=I18nObject( - en_US="Sets the random number seed to use for generation. Setting this to " - "a specific number will make the model generate the same text for " - "the same prompt. (Default: 0)", - zh_Hans="设置用于生成的随机数种子。将此设置为特定数字将使模型对相同的提示生成相同的文本。(默认值:0)", - ), - ), - ParameterRule( - name="keep_alive", - label=I18nObject(en_US="Keep Alive", zh_Hans="模型存活时间"), - type=ParameterType.STRING, - help=I18nObject( - en_US="Sets how long the model is kept in memory after generating a response. " - "This must be a duration string with a unit (e.g., '10m' for 10 minutes or '24h' for 24 hours)." - " A negative number keeps the model loaded indefinitely, and '0' unloads the model" - " immediately after generating a response." - " Valid time units are 's','m','h'. (Default: 5m)", - zh_Hans="设置模型在生成响应后在内存中保留的时间。" - "这必须是一个带有单位的持续时间字符串(例如,'10m' 表示10分钟,'24h' 表示24小时)。" - "负数表示无限期地保留模型,'0'表示在生成响应后立即卸载模型。" - "有效的时间单位有 's'(秒)、'm'(分钟)、'h'(小时)。(默认值:5m)", - ), - ), - ParameterRule( - name="format", - label=I18nObject(en_US="Format", zh_Hans="返回格式"), - type=ParameterType.STRING, - help=I18nObject( - en_US="the format to return a response in. Currently the only accepted value is json.", - zh_Hans="返回响应的格式。目前唯一接受的值是json。", - ), - options=["json"], - ), - ], - pricing=PriceConfig( - input=Decimal(credentials.get("input_price", 0)), - output=Decimal(credentials.get("output_price", 0)), - unit=Decimal(credentials.get("unit", 0)), - currency=credentials.get("currency", "USD"), - ), - **extras, - ) - - return entity - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeAuthorizationError: [ - requests.exceptions.InvalidHeader, # Missing or Invalid API Key - ], - InvokeBadRequestError: [ - requests.exceptions.HTTPError, # Invalid Endpoint URL or model name - requests.exceptions.InvalidURL, # Misconfigured request or other API error - ], - InvokeRateLimitError: [ - requests.exceptions.RetryError # Too many requests sent in a short period of time - ], - InvokeServerUnavailableError: [ - requests.exceptions.ConnectionError, # Engine Overloaded - requests.exceptions.HTTPError, # Server Error - ], - InvokeConnectionError: [ - requests.exceptions.ConnectTimeout, # Timeout - requests.exceptions.ReadTimeout, # Timeout - ], - } diff --git a/api/core/model_runtime/model_providers/ollama/ollama.py b/api/core/model_runtime/model_providers/ollama/ollama.py deleted file mode 100644 index 115280193a..0000000000 --- a/api/core/model_runtime/model_providers/ollama/ollama.py +++ /dev/null @@ -1,16 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class OpenAIProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - pass diff --git a/api/core/model_runtime/model_providers/ollama/ollama.yaml b/api/core/model_runtime/model_providers/ollama/ollama.yaml deleted file mode 100644 index 33747753bd..0000000000 --- a/api/core/model_runtime/model_providers/ollama/ollama.yaml +++ /dev/null @@ -1,98 +0,0 @@ -provider: ollama -label: - en_US: Ollama -icon_large: - en_US: icon_l_en.svg -icon_small: - en_US: icon_s_en.svg -background: "#F9FAFB" -help: - title: - en_US: How to integrate with Ollama - zh_Hans: 如何集成 Ollama - url: - en_US: https://docs.dify.ai/tutorials/model-configuration/ollama -supported_model_types: - - llm - - text-embedding -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: base_url - label: - zh_Hans: 基础 URL - en_US: Base URL - type: text-input - required: true - placeholder: - zh_Hans: Ollama server 的基础 URL,例如 http://192.168.1.100:11434 - en_US: Base url of Ollama server, e.g. http://192.168.1.100:11434 - - variable: mode - show_on: - - variable: __model_type - value: llm - label: - zh_Hans: 模型类型 - en_US: Completion mode - type: select - required: true - default: chat - placeholder: - zh_Hans: 选择对话类型 - en_US: Select completion mode - options: - - value: completion - label: - en_US: Completion - zh_Hans: 补全 - - value: chat - label: - en_US: Chat - zh_Hans: 对话 - - variable: context_size - label: - zh_Hans: 模型上下文长度 - en_US: Model context size - required: true - type: text-input - default: '4096' - placeholder: - zh_Hans: 在此输入您的模型上下文长度 - en_US: Enter your Model context size - - variable: max_tokens - label: - zh_Hans: 最大 token 上限 - en_US: Upper bound for max tokens - show_on: - - variable: __model_type - value: llm - default: '4096' - type: text-input - required: true - - variable: vision_support - label: - zh_Hans: 是否支持 Vision - en_US: Vision support - show_on: - - variable: __model_type - value: llm - default: 'false' - type: radio - required: false - options: - - value: 'true' - label: - en_US: 'Yes' - zh_Hans: 是 - - value: 'false' - label: - en_US: 'No' - zh_Hans: 否 diff --git a/api/core/model_runtime/model_providers/ollama/text_embedding/__init__.py b/api/core/model_runtime/model_providers/ollama/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openai/__init__.py b/api/core/model_runtime/model_providers/openai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openai/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/openai/_assets/icon_l_en.svg deleted file mode 100644 index dae73f58d7..0000000000 --- a/api/core/model_runtime/model_providers/openai/_assets/icon_l_en.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/openai/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/openai/_assets/icon_s_en.svg deleted file mode 100644 index 70686f9b3b..0000000000 --- a/api/core/model_runtime/model_providers/openai/_assets/icon_s_en.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/api/core/model_runtime/model_providers/openai/_common.py b/api/core/model_runtime/model_providers/openai/_common.py deleted file mode 100644 index 2181bb4f08..0000000000 --- a/api/core/model_runtime/model_providers/openai/_common.py +++ /dev/null @@ -1,60 +0,0 @@ -from collections.abc import Mapping - -import openai -from httpx import Timeout - -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) - - -class _CommonOpenAI: - def _to_credential_kwargs(self, credentials: Mapping) -> dict: - """ - Transform credentials to kwargs for model instance - - :param credentials: - :return: - """ - credentials_kwargs = { - "api_key": credentials["openai_api_key"], - "timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0), - "max_retries": 1, - } - - if credentials.get("openai_api_base"): - openai_api_base = credentials["openai_api_base"].rstrip("/") - credentials_kwargs["base_url"] = openai_api_base + "/v1" - - if "openai_organization" in credentials: - credentials_kwargs["organization"] = credentials["openai_organization"] - - return credentials_kwargs - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError], - InvokeServerUnavailableError: [openai.InternalServerError], - InvokeRateLimitError: [openai.RateLimitError], - InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError], - InvokeBadRequestError: [ - openai.BadRequestError, - openai.NotFoundError, - openai.UnprocessableEntityError, - openai.APIError, - ], - } diff --git a/api/core/model_runtime/model_providers/openai/llm/__init__.py b/api/core/model_runtime/model_providers/openai/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openai/llm/_position.yaml b/api/core/model_runtime/model_providers/openai/llm/_position.yaml deleted file mode 100644 index 7501bc1164..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/_position.yaml +++ /dev/null @@ -1,26 +0,0 @@ -- gpt-4 -- gpt-4o -- gpt-4o-2024-05-13 -- gpt-4o-2024-08-06 -- chatgpt-4o-latest -- gpt-4o-mini -- gpt-4o-mini-2024-07-18 -- o1-preview -- o1-preview-2024-09-12 -- o1-mini -- o1-mini-2024-09-12 -- gpt-4-turbo -- gpt-4-turbo-2024-04-09 -- gpt-4-turbo-preview -- gpt-4-32k -- gpt-4-1106-preview -- gpt-4-0125-preview -- gpt-4-vision-preview -- gpt-3.5-turbo -- gpt-3.5-turbo-16k -- gpt-3.5-turbo-16k-0613 -- gpt-3.5-turbo-0125 -- gpt-3.5-turbo-1106 -- gpt-3.5-turbo-0613 -- gpt-3.5-turbo-instruct -- text-davinci-003 diff --git a/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml b/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml deleted file mode 100644 index b47449a49a..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml +++ /dev/null @@ -1,44 +0,0 @@ -model: chatgpt-4o-latest -label: - zh_Hans: chatgpt-4o-latest - en_US: chatgpt-4o-latest -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call - - vision -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 16384 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '2.50' - output: '10.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0125.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0125.yaml deleted file mode 100644 index ffa725ec40..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0125.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: gpt-3.5-turbo-0125 -label: - zh_Hans: gpt-3.5-turbo-0125 - en_US: gpt-3.5-turbo-0125 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 16385 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.0005' - output: '0.0015' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0613.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0613.yaml deleted file mode 100644 index a1ad07d712..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-0613.yaml +++ /dev/null @@ -1,34 +0,0 @@ -model: gpt-3.5-turbo-0613 -label: - zh_Hans: gpt-3.5-turbo-0613 - en_US: gpt-3.5-turbo-0613 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: '0.0015' - output: '0.002' - unit: '0.001' - currency: USD -deprecated: true diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-1106.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-1106.yaml deleted file mode 100644 index 21150fc3a6..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-1106.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: gpt-3.5-turbo-1106 -label: - zh_Hans: gpt-3.5-turbo-1106 - en_US: gpt-3.5-turbo-1106 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 16385 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.001' - output: '0.002' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-16k-0613.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-16k-0613.yaml deleted file mode 100644 index 4e30279284..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-16k-0613.yaml +++ /dev/null @@ -1,34 +0,0 @@ -model: gpt-3.5-turbo-16k-0613 -label: - zh_Hans: gpt-3.5-turbo-16k-0613 - en_US: gpt-3.5-turbo-16k-0613 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 16385 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 16385 - - name: response_format - use_template: response_format -pricing: - input: '0.003' - output: '0.004' - unit: '0.001' - currency: USD -deprecated: true diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-16k.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-16k.yaml deleted file mode 100644 index 3684c1945c..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-16k.yaml +++ /dev/null @@ -1,33 +0,0 @@ -model: gpt-3.5-turbo-16k -label: - zh_Hans: gpt-3.5-turbo-16k - en_US: gpt-3.5-turbo-16k -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 16385 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 16385 - - name: response_format - use_template: response_format -pricing: - input: '0.003' - output: '0.004' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-instruct.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-instruct.yaml deleted file mode 100644 index ad831539e0..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: gpt-3.5-turbo-instruct -label: - zh_Hans: gpt-3.5-turbo-instruct - en_US: gpt-3.5-turbo-instruct -model_type: llm -features: [ ] -model_properties: - mode: completion - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: '0.0015' - output: '0.002' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml deleted file mode 100644 index d3a8ee535a..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-3.5-turbo.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: gpt-3.5-turbo -label: - zh_Hans: gpt-3.5-turbo - en_US: gpt-3.5-turbo -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 16385 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.0005' - output: '0.0015' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-0125-preview.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-0125-preview.yaml deleted file mode 100644 index ac4ec5840b..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-0125-preview.yaml +++ /dev/null @@ -1,56 +0,0 @@ -model: gpt-4-0125-preview -label: - zh_Hans: gpt-4-0125-preview - en_US: gpt-4-0125-preview -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: seed - label: - zh_Hans: 种子 - en_US: Seed - type: int - help: - zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint - 响应参数来监视变化。 - en_US: If specified, model will make a best effort to sample deterministically, - such that repeated requests with the same seed and parameters should return - the same result. Determinism is not guaranteed, and you should refer to the - system_fingerprint response parameter to monitor changes in the backend. - required: false - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.01' - output: '0.03' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-1106-preview.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-1106-preview.yaml deleted file mode 100644 index d775239770..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-1106-preview.yaml +++ /dev/null @@ -1,56 +0,0 @@ -model: gpt-4-1106-preview -label: - zh_Hans: gpt-4-1106-preview - en_US: gpt-4-1106-preview -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: seed - label: - zh_Hans: 种子 - en_US: Seed - type: int - help: - zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint - 响应参数来监视变化。 - en_US: If specified, model will make a best effort to sample deterministically, - such that repeated requests with the same seed and parameters should return - the same result. Determinism is not guaranteed, and you should refer to the - system_fingerprint response parameter to monitor changes in the backend. - required: false - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.01' - output: '0.03' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-32k.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-32k.yaml deleted file mode 100644 index 8358425e6d..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-32k.yaml +++ /dev/null @@ -1,56 +0,0 @@ -model: gpt-4-32k -label: - zh_Hans: gpt-4-32k - en_US: gpt-4-32k -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 32768 - - name: seed - label: - zh_Hans: 种子 - en_US: Seed - type: int - help: - zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint - 响应参数来监视变化。 - en_US: If specified, model will make a best effort to sample deterministically, - such that repeated requests with the same seed and parameters should return - the same result. Determinism is not guaranteed, and you should refer to the - system_fingerprint response parameter to monitor changes in the backend. - required: false - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.06' - output: '0.12' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-2024-04-09.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-2024-04-09.yaml deleted file mode 100644 index 0234499164..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-2024-04-09.yaml +++ /dev/null @@ -1,57 +0,0 @@ -model: gpt-4-turbo-2024-04-09 -label: - zh_Hans: gpt-4-turbo-2024-04-09 - en_US: gpt-4-turbo-2024-04-09 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call - - vision -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: seed - label: - zh_Hans: 种子 - en_US: Seed - type: int - help: - zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint - 响应参数来监视变化。 - en_US: If specified, model will make a best effort to sample deterministically, - such that repeated requests with the same seed and parameters should return - the same result. Determinism is not guaranteed, and you should refer to the - system_fingerprint response parameter to monitor changes in the backend. - required: false - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.01' - output: '0.03' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-preview.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-preview.yaml deleted file mode 100644 index 8d29cf0c04..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo-preview.yaml +++ /dev/null @@ -1,56 +0,0 @@ -model: gpt-4-turbo-preview -label: - zh_Hans: gpt-4-turbo-preview - en_US: gpt-4-turbo-preview -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: seed - label: - zh_Hans: 种子 - en_US: Seed - type: int - help: - zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint - 响应参数来监视变化。 - en_US: If specified, model will make a best effort to sample deterministically, - such that repeated requests with the same seed and parameters should return - the same result. Determinism is not guaranteed, and you should refer to the - system_fingerprint response parameter to monitor changes in the backend. - required: false - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.01' - output: '0.03' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo.yaml deleted file mode 100644 index b25ff6a812..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-turbo.yaml +++ /dev/null @@ -1,57 +0,0 @@ -model: gpt-4-turbo -label: - zh_Hans: gpt-4-turbo - en_US: gpt-4-turbo -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call - - vision -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: seed - label: - zh_Hans: 种子 - en_US: Seed - type: int - help: - zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint - 响应参数来监视变化。 - en_US: If specified, model will make a best effort to sample deterministically, - such that repeated requests with the same seed and parameters should return - the same result. Determinism is not guaranteed, and you should refer to the - system_fingerprint response parameter to monitor changes in the backend. - required: false - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.01' - output: '0.03' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4-vision-preview.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4-vision-preview.yaml deleted file mode 100644 index 07037c6643..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4-vision-preview.yaml +++ /dev/null @@ -1,54 +0,0 @@ -model: gpt-4-vision-preview -label: - zh_Hans: gpt-4-vision-preview - en_US: gpt-4-vision-preview -model_type: llm -features: - - vision -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: seed - label: - zh_Hans: 种子 - en_US: Seed - type: int - help: - zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint - 响应参数来监视变化。 - en_US: If specified, model will make a best effort to sample deterministically, - such that repeated requests with the same seed and parameters should return - the same result. Determinism is not guaranteed, and you should refer to the - system_fingerprint response parameter to monitor changes in the backend. - required: false - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.01' - output: '0.03' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4.yaml deleted file mode 100644 index f7b5138b7d..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4.yaml +++ /dev/null @@ -1,56 +0,0 @@ -model: gpt-4 -label: - zh_Hans: gpt-4 - en_US: gpt-4 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 - - name: seed - label: - zh_Hans: 种子 - en_US: Seed - type: int - help: - zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint - 响应参数来监视变化。 - en_US: If specified, model will make a best effort to sample deterministically, - such that repeated requests with the same seed and parameters should return - the same result. Determinism is not guaranteed, and you should refer to the - system_fingerprint response parameter to monitor changes in the backend. - required: false - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '0.03' - output: '0.06' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml deleted file mode 100644 index b630d6f630..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml +++ /dev/null @@ -1,44 +0,0 @@ -model: gpt-4o-2024-05-13 -label: - zh_Hans: gpt-4o-2024-05-13 - en_US: gpt-4o-2024-05-13 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call - - vision -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '5.00' - output: '15.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml deleted file mode 100644 index 73b7f69700..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml +++ /dev/null @@ -1,47 +0,0 @@ -model: gpt-4o-2024-08-06 -label: - zh_Hans: gpt-4o-2024-08-06 - en_US: gpt-4o-2024-08-06 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call - - vision -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 16384 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object - - json_schema - - name: json_schema - use_template: json_schema -pricing: - input: '2.50' - output: '10.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml deleted file mode 100644 index df38270f79..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml +++ /dev/null @@ -1,47 +0,0 @@ -model: gpt-4o-mini-2024-07-18 -label: - zh_Hans: gpt-4o-mini-2024-07-18 - en_US: gpt-4o-mini-2024-07-18 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call - - vision -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 16384 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object - - json_schema - - name: json_schema - use_template: json_schema -pricing: - input: '0.15' - output: '0.60' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml deleted file mode 100644 index 5e3c94fbe2..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml +++ /dev/null @@ -1,47 +0,0 @@ -model: gpt-4o-mini -label: - zh_Hans: gpt-4o-mini - en_US: gpt-4o-mini -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call - - vision -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 16384 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object - - json_schema - - name: json_schema - use_template: json_schema -pricing: - input: '0.15' - output: '0.60' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml deleted file mode 100644 index 3090a9e090..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml +++ /dev/null @@ -1,44 +0,0 @@ -model: gpt-4o -label: - zh_Hans: gpt-4o - en_US: gpt-4o -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call - - vision -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '5.00' - output: '15.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/llm.py b/api/core/model_runtime/model_providers/openai/llm/llm.py deleted file mode 100644 index d42fce528a..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/llm.py +++ /dev/null @@ -1,1182 +0,0 @@ -import json -import logging -from collections.abc import Generator -from typing import Optional, Union, cast - -import tiktoken -from openai import OpenAI, Stream -from openai.types import Completion -from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessageToolCall -from openai.types.chat.chat_completion_chunk import ChoiceDeltaFunctionCall, ChoiceDeltaToolCall -from openai.types.chat.chat_completion_message import FunctionCall - -from core.model_runtime.callbacks.base_callback import Callback -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, I18nObject, ModelType, PriceConfig -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.openai._common import _CommonOpenAI - -logger = logging.getLogger(__name__) - -OPENAI_BLOCK_MODE_PROMPT = """You should always follow the instructions and output a valid {{block}} object. -The structure of the {{block}} object you can found in the instructions, use {"answer": "$your_answer"} as the default structure -if you are not sure about the structure. - - -{{instructions}} - -""" # noqa: E501 - - -class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): - """ - Model class for OpenAI large language model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # handle fine tune remote models - base_model = model - if model.startswith("ft:"): - base_model = model.split(":")[1] - - # get model mode - model_mode = self.get_model_mode(base_model, credentials) - - if model_mode == LLMMode.CHAT: - # chat model - return self._chat_generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - else: - # text completion model - return self._generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - stop=stop, - stream=stream, - user=user, - ) - - def _code_block_mode_wrapper( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - callbacks: list[Callback] = None, - ) -> Union[LLMResult, Generator]: - """ - Code block mode wrapper for invoking large language model - """ - # handle fine tune remote models - base_model = model - if model.startswith("ft:"): - base_model = model.split(":")[1] - - # get model mode - model_mode = self.get_model_mode(base_model, credentials) - - # transform response format - if "response_format" in model_parameters and model_parameters["response_format"] in {"JSON", "XML"}: - stop = stop or [] - if model_mode == LLMMode.CHAT: - # chat model - self._transform_chat_json_prompts( - model=base_model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - response_format=model_parameters["response_format"], - ) - else: - self._transform_completion_json_prompts( - model=base_model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - response_format=model_parameters["response_format"], - ) - model_parameters.pop("response_format") - - return self._invoke( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - def _transform_chat_json_prompts( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - response_format: str = "JSON", - ) -> None: - """ - Transform json prompts - """ - if "```\n" not in stop: - stop.append("```\n") - if "\n```" not in stop: - stop.append("\n```") - - # check if there is a system message - if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage): - # override the system message - prompt_messages[0] = SystemPromptMessage( - content=OPENAI_BLOCK_MODE_PROMPT.replace("{{instructions}}", prompt_messages[0].content).replace( - "{{block}}", response_format - ) - ) - prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}\n")) - else: - # insert the system message - prompt_messages.insert( - 0, - SystemPromptMessage( - content=OPENAI_BLOCK_MODE_PROMPT.replace( - "{{instructions}}", f"Please output a valid {response_format} object." - ).replace("{{block}}", response_format) - ), - ) - prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}")) - - def _transform_completion_json_prompts( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - response_format: str = "JSON", - ) -> None: - """ - Transform json prompts - """ - if "```\n" not in stop: - stop.append("```\n") - if "\n```" not in stop: - stop.append("\n```") - - # override the last user message - user_message = None - for i in range(len(prompt_messages) - 1, -1, -1): - if isinstance(prompt_messages[i], UserPromptMessage): - user_message = prompt_messages[i] - break - - if user_message: - if prompt_messages[i].content[-11:] == "Assistant: ": - # now we are in the chat app, remove the last assistant message - prompt_messages[i].content = prompt_messages[i].content[:-11] - prompt_messages[i] = UserPromptMessage( - content=OPENAI_BLOCK_MODE_PROMPT.replace("{{instructions}}", user_message.content).replace( - "{{block}}", response_format - ) - ) - prompt_messages[i].content += f"Assistant:\n```{response_format}\n" - else: - prompt_messages[i] = UserPromptMessage( - content=OPENAI_BLOCK_MODE_PROMPT.replace("{{instructions}}", user_message.content).replace( - "{{block}}", response_format - ) - ) - prompt_messages[i].content += f"\n```{response_format}\n" - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: - """ - # handle fine tune remote models - if model.startswith("ft:"): - base_model = model.split(":")[1] - else: - base_model = model - - # get model mode - model_mode = self.get_model_mode(model) - - if model_mode == LLMMode.CHAT: - # chat model - return self._num_tokens_from_messages(base_model, prompt_messages, tools) - else: - # text completion model, do not support tool calling - return self._num_tokens_from_string(base_model, prompt_messages[0].content) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - - # handle fine tune remote models - base_model = model - # fine-tuned model name likes ft:gpt-3.5-turbo-0613:personal::xxxxx - if model.startswith("ft:"): - base_model = model.split(":")[1] - - # check if model exists - remote_models = self.remote_models(credentials) - remote_model_map = {model.model: model for model in remote_models} - if model not in remote_model_map: - raise CredentialsValidateFailedError(f"Fine-tuned model {model} not found") - - # get model mode - model_mode = self.get_model_mode(base_model, credentials) - - if model_mode == LLMMode.CHAT: - # chat model - client.chat.completions.create( - messages=[{"role": "user", "content": "ping"}], - model=model, - temperature=0, - max_tokens=20, - stream=False, - ) - else: - # text completion model - client.completions.create( - prompt="ping", - model=model, - temperature=0, - max_tokens=20, - stream=False, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def remote_models(self, credentials: dict) -> list[AIModelEntity]: - """ - Return remote models if credentials are provided. - - :param credentials: provider credentials - :return: - """ - # get predefined models - predefined_models = self.predefined_models() - predefined_models_map = {model.model: model for model in predefined_models} - - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - - # get all remote models - remote_models = client.models.list() - - fine_tune_models = [model for model in remote_models if model.id.startswith("ft:")] - - ai_model_entities = [] - for model in fine_tune_models: - base_model = model.id.split(":")[1] - - base_model_schema = None - for predefined_model_name, predefined_model in predefined_models_map.items(): - if predefined_model_name in base_model: - base_model_schema = predefined_model - - if not base_model_schema: - continue - - ai_model_entity = AIModelEntity( - model=model.id, - label=I18nObject(zh_Hans=model.id, en_US=model.id), - model_type=ModelType.LLM, - features=base_model_schema.features, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties=base_model_schema.model_properties, - parameter_rules=base_model_schema.parameter_rules, - pricing=PriceConfig(input=0.003, output=0.006, unit=0.001, currency="USD"), - ) - - ai_model_entities.append(ai_model_entity) - - return ai_model_entities - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke llm completion model - - :param model: model name - :param credentials: credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - - # init model client - client = OpenAI(**credentials_kwargs) - - extra_model_kwargs = {} - - if stop: - extra_model_kwargs["stop"] = stop - - if user: - extra_model_kwargs["user"] = user - - if stream: - extra_model_kwargs["stream_options"] = {"include_usage": True} - - # text completion model - response = client.completions.create( - prompt=prompt_messages[0].content, model=model, stream=stream, **model_parameters, **extra_model_kwargs - ) - - if stream: - return self._handle_generate_stream_response(model, credentials, response, prompt_messages) - - return self._handle_generate_response(model, credentials, response, prompt_messages) - - def _handle_generate_response( - self, model: str, credentials: dict, response: Completion, prompt_messages: list[PromptMessage] - ) -> LLMResult: - """ - Handle llm completion response - - :param model: model name - :param credentials: model credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm result - """ - assistant_text = response.choices[0].text - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=assistant_text) - - # calculate num tokens - if response.usage: - # transform usage - prompt_tokens = response.usage.prompt_tokens - completion_tokens = response.usage.completion_tokens - else: - # calculate num tokens - prompt_tokens = self._num_tokens_from_string(model, prompt_messages[0].content) - completion_tokens = self._num_tokens_from_string(model, assistant_text) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - result = LLMResult( - model=response.model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - system_fingerprint=response.system_fingerprint, - ) - - return result - - def _handle_generate_stream_response( - self, model: str, credentials: dict, response: Stream[Completion], prompt_messages: list[PromptMessage] - ) -> Generator: - """ - Handle llm completion stream response - - :param model: model name - :param credentials: model credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response chunk generator result - """ - full_text = "" - prompt_tokens = 0 - completion_tokens = 0 - - final_chunk = LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=""), - ), - ) - - for chunk in response: - if len(chunk.choices) == 0: - if chunk.usage: - # calculate num tokens - prompt_tokens = chunk.usage.prompt_tokens - completion_tokens = chunk.usage.completion_tokens - continue - - delta = chunk.choices[0] - - if delta.finish_reason is None and (delta.text is None or delta.text == ""): - continue - - # transform assistant message to prompt message - text = delta.text or "" - assistant_prompt_message = AssistantPromptMessage(content=text) - - full_text += text - - if delta.finish_reason is not None: - final_chunk = LLMResultChunk( - model=chunk.model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - finish_reason=delta.finish_reason, - ), - ) - else: - yield LLMResultChunk( - model=chunk.model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - ), - ) - - if not prompt_tokens: - prompt_tokens = self._num_tokens_from_string(model, prompt_messages[0].content) - - if not completion_tokens: - completion_tokens = self._num_tokens_from_string(model, full_text) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - final_chunk.delta.usage = usage - - yield final_chunk - - def _chat_generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke llm chat model - - :param model: model name - :param credentials: credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - - # init model client - client = OpenAI(**credentials_kwargs) - - response_format = model_parameters.get("response_format") - if response_format: - if response_format == "json_schema": - json_schema = model_parameters.get("json_schema") - if not json_schema: - raise ValueError("Must define JSON Schema when the response format is json_schema") - try: - schema = json.loads(json_schema) - except: - raise ValueError(f"not correct json_schema format: {json_schema}") - model_parameters.pop("json_schema") - model_parameters["response_format"] = {"type": "json_schema", "json_schema": schema} - else: - model_parameters["response_format"] = {"type": response_format} - - extra_model_kwargs = {} - - if tools: - # extra_model_kwargs['tools'] = [helper.dump_model(PromptMessageFunction(function=tool)) for tool in tools] - extra_model_kwargs["functions"] = [ - {"name": tool.name, "description": tool.description, "parameters": tool.parameters} for tool in tools - ] - - if stop: - extra_model_kwargs["stop"] = stop - - if user: - extra_model_kwargs["user"] = user - - if stream: - extra_model_kwargs["stream_options"] = {"include_usage": True} - - # clear illegal prompt messages - prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages) - - block_as_stream = False - if model.startswith("o1"): - if stream: - block_as_stream = True - stream = False - - if "stream_options" in extra_model_kwargs: - del extra_model_kwargs["stream_options"] - - if "stop" in extra_model_kwargs: - del extra_model_kwargs["stop"] - - # chat model - response = client.chat.completions.create( - messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages], - model=model, - stream=stream, - **model_parameters, - **extra_model_kwargs, - ) - - if stream: - return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools) - - block_result = self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools) - - if block_as_stream: - return self._handle_chat_block_as_stream_response(block_result, prompt_messages, stop) - - return block_result - - def _handle_chat_block_as_stream_response( - self, - block_result: LLMResult, - prompt_messages: list[PromptMessage], - stop: Optional[list[str]] = None, - ) -> Generator[LLMResultChunk, None, None]: - """ - Handle llm chat response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :param stop: stop words - :return: llm response chunk generator - """ - text = block_result.message.content - text = cast(str, text) - - if stop: - text = self.enforce_stop_tokens(text, stop) - - yield LLMResultChunk( - model=block_result.model, - prompt_messages=prompt_messages, - system_fingerprint=block_result.system_fingerprint, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=text), - finish_reason="stop", - usage=block_result.usage, - ), - ) - - def _handle_chat_generate_response( - self, - model: str, - credentials: dict, - response: ChatCompletion, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> LLMResult: - """ - Handle llm chat response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: llm response - """ - assistant_message = response.choices[0].message - # assistant_message_tool_calls = assistant_message.tool_calls - assistant_message_function_call = assistant_message.function_call - - # extract tool calls from response - # tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls) - function_call = self._extract_response_function_call(assistant_message_function_call) - tool_calls = [function_call] if function_call else [] - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=assistant_message.content, tool_calls=tool_calls) - - # calculate num tokens - if response.usage: - # transform usage - prompt_tokens = response.usage.prompt_tokens - completion_tokens = response.usage.completion_tokens - else: - # calculate num tokens - prompt_tokens = self._num_tokens_from_messages(model, prompt_messages, tools) - completion_tokens = self._num_tokens_from_messages(model, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - response = LLMResult( - model=response.model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - system_fingerprint=response.system_fingerprint, - ) - - return response - - def _handle_chat_generate_stream_response( - self, - model: str, - credentials: dict, - response: Stream[ChatCompletionChunk], - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> Generator: - """ - Handle llm chat stream response - - :param model: model name - :param response: response - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: llm response chunk generator - """ - full_assistant_content = "" - delta_assistant_message_function_call_storage: ChoiceDeltaFunctionCall = None - prompt_tokens = 0 - completion_tokens = 0 - final_tool_calls = [] - final_chunk = LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=""), - ), - ) - - for chunk in response: - if len(chunk.choices) == 0: - if chunk.usage: - # calculate num tokens - prompt_tokens = chunk.usage.prompt_tokens - completion_tokens = chunk.usage.completion_tokens - continue - - delta = chunk.choices[0] - has_finish_reason = delta.finish_reason is not None - - if ( - not has_finish_reason - and (delta.delta.content is None or delta.delta.content == "") - and delta.delta.function_call is None - ): - continue - - # assistant_message_tool_calls = delta.delta.tool_calls - assistant_message_function_call = delta.delta.function_call - - # extract tool calls from response - if delta_assistant_message_function_call_storage is not None: - # handle process of stream function call - if assistant_message_function_call: - # message has not ended ever - delta_assistant_message_function_call_storage.arguments += assistant_message_function_call.arguments - continue - else: - # message has ended - assistant_message_function_call = delta_assistant_message_function_call_storage - delta_assistant_message_function_call_storage = None - else: - if assistant_message_function_call: - # start of stream function call - delta_assistant_message_function_call_storage = assistant_message_function_call - if delta_assistant_message_function_call_storage.arguments is None: - delta_assistant_message_function_call_storage.arguments = "" - if not has_finish_reason: - continue - - # tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls) - function_call = self._extract_response_function_call(assistant_message_function_call) - tool_calls = [function_call] if function_call else [] - if tool_calls: - final_tool_calls.extend(tool_calls) - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls) - - full_assistant_content += delta.delta.content or "" - - if has_finish_reason: - final_chunk = LLMResultChunk( - model=chunk.model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - finish_reason=delta.finish_reason, - ), - ) - else: - yield LLMResultChunk( - model=chunk.model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - ), - ) - - if not prompt_tokens: - prompt_tokens = self._num_tokens_from_messages(model, prompt_messages, tools) - - if not completion_tokens: - full_assistant_prompt_message = AssistantPromptMessage( - content=full_assistant_content, tool_calls=final_tool_calls - ) - completion_tokens = self._num_tokens_from_messages(model, [full_assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - final_chunk.delta.usage = usage - - yield final_chunk - - def _extract_response_tool_calls( - self, response_tool_calls: list[ChatCompletionMessageToolCall | ChoiceDeltaToolCall] - ) -> list[AssistantPromptMessage.ToolCall]: - """ - Extract tool calls from response - - :param response_tool_calls: response tool calls - :return: list of tool calls - """ - tool_calls = [] - if response_tool_calls: - for response_tool_call in response_tool_calls: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_tool_call.function.name, arguments=response_tool_call.function.arguments - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_tool_call.id, type=response_tool_call.type, function=function - ) - tool_calls.append(tool_call) - - return tool_calls - - def _extract_response_function_call( - self, response_function_call: FunctionCall | ChoiceDeltaFunctionCall - ) -> AssistantPromptMessage.ToolCall: - """ - Extract function call from response - - :param response_function_call: response function call - :return: tool call - """ - tool_call = None - if response_function_call: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_function_call.name, arguments=response_function_call.arguments - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_function_call.name, type="function", function=function - ) - - return tool_call - - def _clear_illegal_prompt_messages(self, model: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: - """ - Clear illegal prompt messages for OpenAI API - - :param model: model name - :param prompt_messages: prompt messages - :return: cleaned prompt messages - """ - checklist = ["gpt-4-turbo", "gpt-4-turbo-2024-04-09"] - - if model in checklist: - # count how many user messages are there - user_message_count = len([m for m in prompt_messages if isinstance(m, UserPromptMessage)]) - if user_message_count > 1: - for prompt_message in prompt_messages: - if isinstance(prompt_message, UserPromptMessage): - if isinstance(prompt_message.content, list): - prompt_message.content = "\n".join( - [ - item.data - if item.type == PromptMessageContentType.TEXT - else "[IMAGE]" - if item.type == PromptMessageContentType.IMAGE - else "" - for item in prompt_message.content - ] - ) - - if model.startswith("o1"): - system_message_count = len([m for m in prompt_messages if isinstance(m, SystemPromptMessage)]) - if system_message_count > 0: - new_prompt_messages = [] - for prompt_message in prompt_messages: - if isinstance(prompt_message, SystemPromptMessage): - prompt_message = UserPromptMessage( - content=prompt_message.content, - name=prompt_message.name, - ) - - new_prompt_messages.append(prompt_message) - prompt_messages = new_prompt_messages - - return prompt_messages - - def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: - """ - Convert PromptMessage to dict for OpenAI API - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - sub_messages = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - sub_message_dict = {"type": "text", "text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - sub_message_dict = { - "type": "image_url", - "image_url": {"url": message_content.data, "detail": message_content.detail.value}, - } - sub_messages.append(sub_message_dict) - - message_dict = {"role": "user", "content": sub_messages} - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - if message.tool_calls: - # message_dict["tool_calls"] = [tool_call.dict() for tool_call in - # message.tool_calls] - function_call = message.tool_calls[0] - message_dict["function_call"] = { - "name": function_call.function.name, - "arguments": function_call.function.arguments, - } - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - # message_dict = { - # "role": "tool", - # "content": message.content, - # "tool_call_id": message.tool_call_id - # } - message_dict = {"role": "function", "content": message.content, "name": message.tool_call_id} - else: - raise ValueError(f"Got unknown type {message}") - - if message.name: - message_dict["name"] = message.name - - return message_dict - - def _num_tokens_from_string(self, model: str, text: str, tools: Optional[list[PromptMessageTool]] = None) -> int: - """ - Calculate num tokens for text completion model with tiktoken package. - - :param model: model name - :param text: prompt text - :param tools: tools for tool calling - :return: number of tokens - """ - try: - encoding = tiktoken.encoding_for_model(model) - except KeyError: - encoding = tiktoken.get_encoding("cl100k_base") - - num_tokens = len(encoding.encode(text)) - - if tools: - num_tokens += self._num_tokens_for_tools(encoding, tools) - - return num_tokens - - def _num_tokens_from_messages( - self, model: str, messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None - ) -> int: - """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. - - Official documentation: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" - if model.startswith("ft:"): - model = model.split(":")[1] - - # Currently, we can use gpt4o to calculate chatgpt-4o-latest's token. - if model == "chatgpt-4o-latest" or model.startswith("o1"): - model = "gpt-4o" - - try: - encoding = tiktoken.encoding_for_model(model) - except KeyError: - logger.warning("Warning: model not found. Using cl100k_base encoding.") - model = "cl100k_base" - encoding = tiktoken.get_encoding(model) - - if model.startswith("gpt-3.5-turbo-0301"): - # every message follows {role/name}\n{content}\n - tokens_per_message = 4 - # if there's a name, the role is omitted - tokens_per_name = -1 - elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4") or model.startswith("o1"): - tokens_per_message = 3 - tokens_per_name = 1 - else: - raise NotImplementedError( - f"get_num_tokens_from_messages() is not presently implemented " - f"for model {model}." - "See https://platform.openai.com/docs/advanced-usage/managing-tokens for " - "information on how messages are converted to tokens." - ) - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - # Cast str(value) in case the message value is not a string - # This occurs with function messages - # TODO: The current token calculation method for the image type is not implemented, - # which need to download the image and then get the resolution for calculation, - # and will increase the request delay - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - if key == "tool_calls": - for tool_call in value: - for t_key, t_value in tool_call.items(): - num_tokens += len(encoding.encode(t_key)) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += len(encoding.encode(f_key)) - num_tokens += len(encoding.encode(f_value)) - else: - num_tokens += len(encoding.encode(t_key)) - num_tokens += len(encoding.encode(t_value)) - else: - num_tokens += len(encoding.encode(str(value))) - - if key == "name": - num_tokens += tokens_per_name - - # every reply is primed with assistant - num_tokens += 3 - - if tools: - num_tokens += self._num_tokens_for_tools(encoding, tools) - - return num_tokens - - def _num_tokens_for_tools(self, encoding: tiktoken.Encoding, tools: list[PromptMessageTool]) -> int: - """ - Calculate num tokens for tool calling with tiktoken package. - - :param encoding: encoding - :param tools: tools for tool calling - :return: number of tokens - """ - num_tokens = 0 - for tool in tools: - num_tokens += len(encoding.encode("type")) - num_tokens += len(encoding.encode("function")) - - # calculate num tokens for function object - num_tokens += len(encoding.encode("name")) - num_tokens += len(encoding.encode(tool.name)) - num_tokens += len(encoding.encode("description")) - num_tokens += len(encoding.encode(tool.description)) - parameters = tool.parameters - num_tokens += len(encoding.encode("parameters")) - if "title" in parameters: - num_tokens += len(encoding.encode("title")) - num_tokens += len(encoding.encode(parameters.get("title"))) - num_tokens += len(encoding.encode("type")) - num_tokens += len(encoding.encode(parameters.get("type"))) - if "properties" in parameters: - num_tokens += len(encoding.encode("properties")) - for key, value in parameters.get("properties").items(): - num_tokens += len(encoding.encode(key)) - for field_key, field_value in value.items(): - num_tokens += len(encoding.encode(field_key)) - if field_key == "enum": - for enum_field in field_value: - num_tokens += 3 - num_tokens += len(encoding.encode(enum_field)) - else: - num_tokens += len(encoding.encode(field_key)) - num_tokens += len(encoding.encode(str(field_value))) - if "required" in parameters: - num_tokens += len(encoding.encode("required")) - for required_field in parameters["required"]: - num_tokens += 3 - num_tokens += len(encoding.encode(required_field)) - - return num_tokens - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - OpenAI supports fine-tuning of their models. This method returns the schema of the base model - but renamed to the fine-tuned model name. - - :param model: model name - :param credentials: credentials - - :return: model schema - """ - if not model.startswith("ft:"): - base_model = model - else: - # get base_model - base_model = model.split(":")[1] - - # get model schema - models = self.predefined_models() - model_map = {model.model: model for model in models} - if base_model not in model_map: - raise ValueError(f"Base model {base_model} not found") - - base_model_schema = model_map[base_model] - - base_model_schema_features = base_model_schema.features or [] - base_model_schema_model_properties = base_model_schema.model_properties or {} - base_model_schema_parameters_rules = base_model_schema.parameter_rules or [] - - entity = AIModelEntity( - model=model, - label=I18nObject(zh_Hans=model, en_US=model), - model_type=ModelType.LLM, - features=list(base_model_schema_features), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties=dict(base_model_schema_model_properties.items()), - parameter_rules=list(base_model_schema_parameters_rules), - pricing=base_model_schema.pricing, - ) - - return entity diff --git a/api/core/model_runtime/model_providers/openai/llm/o1-mini-2024-09-12.yaml b/api/core/model_runtime/model_providers/openai/llm/o1-mini-2024-09-12.yaml deleted file mode 100644 index 0ade7f8ded..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/o1-mini-2024-09-12.yaml +++ /dev/null @@ -1,33 +0,0 @@ -model: o1-mini-2024-09-12 -label: - zh_Hans: o1-mini-2024-09-12 - en_US: o1-mini-2024-09-12 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: max_tokens - use_template: max_tokens - default: 65536 - min: 1 - max: 65536 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: response_format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '3.00' - output: '12.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/o1-mini.yaml b/api/core/model_runtime/model_providers/openai/llm/o1-mini.yaml deleted file mode 100644 index 60816c5d1e..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/o1-mini.yaml +++ /dev/null @@ -1,33 +0,0 @@ -model: o1-mini -label: - zh_Hans: o1-mini - en_US: o1-mini -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: max_tokens - use_template: max_tokens - default: 65536 - min: 1 - max: 65536 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: response_format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '3.00' - output: '12.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/o1-preview-2024-09-12.yaml b/api/core/model_runtime/model_providers/openai/llm/o1-preview-2024-09-12.yaml deleted file mode 100644 index c9da96f611..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/o1-preview-2024-09-12.yaml +++ /dev/null @@ -1,33 +0,0 @@ -model: o1-preview-2024-09-12 -label: - zh_Hans: o1-preview-2024-09-12 - en_US: o1-preview-2024-09-12 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: max_tokens - use_template: max_tokens - default: 32768 - min: 1 - max: 32768 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: response_format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '15.00' - output: '60.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/o1-preview.yaml b/api/core/model_runtime/model_providers/openai/llm/o1-preview.yaml deleted file mode 100644 index c83874b765..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/o1-preview.yaml +++ /dev/null @@ -1,33 +0,0 @@ -model: o1-preview -label: - zh_Hans: o1-preview - en_US: o1-preview -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: max_tokens - use_template: max_tokens - default: 32768 - min: 1 - max: 32768 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: response_format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '15.00' - output: '60.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/text-davinci-003.yaml b/api/core/model_runtime/model_providers/openai/llm/text-davinci-003.yaml deleted file mode 100644 index 76b5d84875..0000000000 --- a/api/core/model_runtime/model_providers/openai/llm/text-davinci-003.yaml +++ /dev/null @@ -1,29 +0,0 @@ -model: text-davinci-003 -label: - zh_Hans: text-davinci-003 - en_US: text-davinci-003 -model_type: llm -features: [ ] -model_properties: - mode: completion - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 -pricing: - input: '0.001' - output: '0.002' - unit: '0.001' - currency: USD -deprecated: true diff --git a/api/core/model_runtime/model_providers/openai/moderation/moderation.py b/api/core/model_runtime/model_providers/openai/moderation/moderation.py index 619044d808..a83248c0c2 100644 --- a/api/core/model_runtime/model_providers/openai/moderation/moderation.py +++ b/api/core/model_runtime/model_providers/openai/moderation/moderation.py @@ -1,15 +1,25 @@ +from collections.abc import Mapping from typing import Optional +import openai +from httpx import Timeout from openai import OpenAI from openai.types import ModerationCreateResponse from core.model_runtime.entities.model_entities import ModelPropertyKey +from core.model_runtime.errors.invoke import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.model_providers.__base.moderation_model import ModerationModel -from core.model_runtime.model_providers.openai._common import _CommonOpenAI -class OpenAIModerationModel(_CommonOpenAI, ModerationModel): +class OpenAIModerationModel(ModerationModel): """ Model class for OpenAI text moderation model. """ @@ -111,3 +121,48 @@ class OpenAIModerationModel(_CommonOpenAI, ModerationModel): return model_schema.model_properties[ModelPropertyKey.MAX_CHUNKS] return 1 + + def _to_credential_kwargs(self, credentials: Mapping) -> dict: + """ + Transform credentials to kwargs for model instance + + :param credentials: + :return: + """ + credentials_kwargs = { + "api_key": credentials["openai_api_key"], + "timeout": Timeout(315.0, read=300.0, write=10.0, connect=5.0), + "max_retries": 1, + } + + if credentials.get("openai_api_base"): + openai_api_base = credentials["openai_api_base"].rstrip("/") + credentials_kwargs["base_url"] = openai_api_base + "/v1" + + if "openai_organization" in credentials: + credentials_kwargs["organization"] = credentials["openai_organization"] + + return credentials_kwargs + + @property + def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: + """ + Map model invoke error to unified error + The key is the error type thrown to the caller + The value is the error type thrown by the model, + which needs to be converted into a unified error type for the caller. + + :return: Invoke error mapping + """ + return { + InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError], + InvokeServerUnavailableError: [openai.InternalServerError], + InvokeRateLimitError: [openai.RateLimitError], + InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError], + InvokeBadRequestError: [ + openai.BadRequestError, + openai.NotFoundError, + openai.UnprocessableEntityError, + openai.APIError, + ], + } diff --git a/api/core/model_runtime/model_providers/openai/moderation/text-moderation-stable.yaml b/api/core/model_runtime/model_providers/openai/moderation/text-moderation-stable.yaml deleted file mode 100644 index 5ca1809167..0000000000 --- a/api/core/model_runtime/model_providers/openai/moderation/text-moderation-stable.yaml +++ /dev/null @@ -1,5 +0,0 @@ -model: text-moderation-stable -model_type: moderation -model_properties: - max_chunks: 32 - max_characters_per_chunk: 2000 diff --git a/api/core/model_runtime/model_providers/openai/openai.py b/api/core/model_runtime/model_providers/openai/openai.py deleted file mode 100644 index 175d7db73c..0000000000 --- a/api/core/model_runtime/model_providers/openai/openai.py +++ /dev/null @@ -1,29 +0,0 @@ -import logging -from collections.abc import Mapping - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class OpenAIProvider(ModelProvider): - def validate_provider_credentials(self, credentials: Mapping) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `gpt-3.5-turbo` model for validate, - # no matter what model you pass in, text completion model or chat model - model_instance.validate_credentials(model="gpt-3.5-turbo", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/openai/openai.yaml b/api/core/model_runtime/model_providers/openai/openai.yaml deleted file mode 100644 index b4dc8fd4f2..0000000000 --- a/api/core/model_runtime/model_providers/openai/openai.yaml +++ /dev/null @@ -1,89 +0,0 @@ -provider: openai -label: - en_US: OpenAI -description: - en_US: Models provided by OpenAI, such as GPT-3.5-Turbo and GPT-4. - zh_Hans: OpenAI 提供的模型,例如 GPT-3.5-Turbo 和 GPT-4。 -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#E5E7EB" -help: - title: - en_US: Get your API Key from OpenAI - zh_Hans: 从 OpenAI 获取 API Key - url: - en_US: https://platform.openai.com/account/api-keys -supported_model_types: - - llm - - text-embedding - - speech2text - - moderation - - tts -configurate_methods: - - predefined-model - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: openai_api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: openai_organization - label: - zh_Hans: 组织 ID - en_US: Organization - type: text-input - required: false - placeholder: - zh_Hans: 在此输入您的组织 ID - en_US: Enter your Organization ID - - variable: openai_api_base - label: - zh_Hans: API Base - en_US: API Base - type: text-input - required: false - placeholder: - zh_Hans: 在此输入您的 API Base - en_US: Enter your API Base -provider_credential_schema: - credential_form_schemas: - - variable: openai_api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: openai_organization - label: - zh_Hans: 组织 ID - en_US: Organization - type: text-input - required: false - placeholder: - zh_Hans: 在此输入您的组织 ID - en_US: Enter your Organization ID - - variable: openai_api_base - label: - zh_Hans: API Base - en_US: API Base - type: text-input - required: false - placeholder: - zh_Hans: 在此输入您的 API Base, 如:https://api.openai.com - en_US: Enter your API Base, e.g. https://api.openai.com diff --git a/api/core/model_runtime/model_providers/openai/speech2text/__init__.py b/api/core/model_runtime/model_providers/openai/speech2text/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openai/speech2text/speech2text.py b/api/core/model_runtime/model_providers/openai/speech2text/speech2text.py deleted file mode 100644 index 18f97e45f3..0000000000 --- a/api/core/model_runtime/model_providers/openai/speech2text/speech2text.py +++ /dev/null @@ -1,60 +0,0 @@ -from typing import IO, Optional - -from openai import OpenAI - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel -from core.model_runtime.model_providers.openai._common import _CommonOpenAI - - -class OpenAISpeech2TextModel(_CommonOpenAI, Speech2TextModel): - """ - Model class for OpenAI Speech to text model. - """ - - def _invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) -> str: - """ - Invoke speech2text model - - :param model: model name - :param credentials: model credentials - :param file: audio file - :param user: unique user id - :return: text for given audio file - """ - return self._speech2text_invoke(model, credentials, file) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - audio_file_path = self._get_demo_file_path() - - with open(audio_file_path, "rb") as audio_file: - self._speech2text_invoke(model, credentials, audio_file) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _speech2text_invoke(self, model: str, credentials: dict, file: IO[bytes]) -> str: - """ - Invoke speech2text model - - :param model: model name - :param credentials: model credentials - :param file: audio file - :return: text for given audio file - """ - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - - # init model client - client = OpenAI(**credentials_kwargs) - - response = client.audio.transcriptions.create(model=model, file=file) - - return response.text diff --git a/api/core/model_runtime/model_providers/openai/speech2text/whisper-1.yaml b/api/core/model_runtime/model_providers/openai/speech2text/whisper-1.yaml deleted file mode 100644 index 6c14c76619..0000000000 --- a/api/core/model_runtime/model_providers/openai/speech2text/whisper-1.yaml +++ /dev/null @@ -1,5 +0,0 @@ -model: whisper-1 -model_type: speech2text -model_properties: - file_upload_limit: 25 - supported_file_extensions: flac,mp3,mp4,mpeg,mpga,m4a,ogg,wav,webm diff --git a/api/core/model_runtime/model_providers/openai/text_embedding/__init__.py b/api/core/model_runtime/model_providers/openai/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openai/text_embedding/text-embedding-3-large.yaml b/api/core/model_runtime/model_providers/openai/text_embedding/text-embedding-3-large.yaml deleted file mode 100644 index 9489170fde..0000000000 --- a/api/core/model_runtime/model_providers/openai/text_embedding/text-embedding-3-large.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: text-embedding-3-large -model_type: text-embedding -model_properties: - context_size: 8191 - max_chunks: 32 -pricing: - input: '0.00013' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/text_embedding/text-embedding-3-small.yaml b/api/core/model_runtime/model_providers/openai/text_embedding/text-embedding-3-small.yaml deleted file mode 100644 index 586ba2b28f..0000000000 --- a/api/core/model_runtime/model_providers/openai/text_embedding/text-embedding-3-small.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: text-embedding-3-small -model_type: text-embedding -model_properties: - context_size: 8191 - max_chunks: 32 -pricing: - input: '0.00002' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/text_embedding/text-embedding-ada-002.yaml b/api/core/model_runtime/model_providers/openai/text_embedding/text-embedding-ada-002.yaml deleted file mode 100644 index ef1c49b017..0000000000 --- a/api/core/model_runtime/model_providers/openai/text_embedding/text-embedding-ada-002.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: text-embedding-ada-002 -model_type: text-embedding -model_properties: - context_size: 8097 - max_chunks: 32 -pricing: - input: '0.0001' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/tts/__init__.py b/api/core/model_runtime/model_providers/openai/tts/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openai/tts/tts-1-hd.yaml b/api/core/model_runtime/model_providers/openai/tts/tts-1-hd.yaml deleted file mode 100644 index 449c131f9d..0000000000 --- a/api/core/model_runtime/model_providers/openai/tts/tts-1-hd.yaml +++ /dev/null @@ -1,31 +0,0 @@ -model: tts-1-hd -model_type: tts -model_properties: - default_voice: 'alloy' - voices: - - mode: 'alloy' - name: 'Alloy' - language: [ 'zh-Hans', 'en-US', 'de-DE', 'fr-FR', 'es-ES', 'it-IT', 'th-TH', 'id-ID' ] - - mode: 'echo' - name: 'Echo' - language: [ 'zh-Hans', 'en-US', 'de-DE', 'fr-FR', 'es-ES', 'it-IT', 'th-TH', 'id-ID' ] - - mode: 'fable' - name: 'Fable' - language: [ 'zh-Hans', 'en-US', 'de-DE', 'fr-FR', 'es-ES', 'it-IT', 'th-TH', 'id-ID' ] - - mode: 'onyx' - name: 'Onyx' - language: [ 'zh-Hans', 'en-US', 'de-DE', 'fr-FR', 'es-ES', 'it-IT', 'th-TH', 'id-ID' ] - - mode: 'nova' - name: 'Nova' - language: [ 'zh-Hans', 'en-US', 'de-DE', 'fr-FR', 'es-ES', 'it-IT', 'th-TH', 'id-ID' ] - - mode: 'shimmer' - name: 'Shimmer' - language: [ 'zh-Hans', 'en-US', 'de-DE', 'fr-FR', 'es-ES', 'it-IT', 'th-TH', 'id-ID' ] - word_limit: 3500 - audio_type: 'mp3' - max_workers: 5 -pricing: - input: '0.03' - output: '0' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/tts/tts-1.yaml b/api/core/model_runtime/model_providers/openai/tts/tts-1.yaml deleted file mode 100644 index 83969fb2f7..0000000000 --- a/api/core/model_runtime/model_providers/openai/tts/tts-1.yaml +++ /dev/null @@ -1,31 +0,0 @@ -model: tts-1 -model_type: tts -model_properties: - default_voice: 'alloy' - voices: - - mode: 'alloy' - name: 'Alloy' - language: ['zh-Hans', 'en-US', 'de-DE', 'fr-FR', 'es-ES', 'it-IT', 'th-TH', 'id-ID'] - - mode: 'echo' - name: 'Echo' - language: ['zh-Hans', 'en-US', 'de-DE', 'fr-FR', 'es-ES', 'it-IT', 'th-TH', 'id-ID'] - - mode: 'fable' - name: 'Fable' - language: ['zh-Hans', 'en-US', 'de-DE', 'fr-FR', 'es-ES', 'it-IT', 'th-TH', 'id-ID'] - - mode: 'onyx' - name: 'Onyx' - language: ['zh-Hans', 'en-US', 'de-DE', 'fr-FR', 'es-ES', 'it-IT', 'th-TH', 'id-ID'] - - mode: 'nova' - name: 'Nova' - language: ['zh-Hans', 'en-US', 'de-DE', 'fr-FR', 'es-ES', 'it-IT', 'th-TH', 'id-ID'] - - mode: 'shimmer' - name: 'Shimmer' - language: ['zh-Hans', 'en-US', 'de-DE', 'fr-FR', 'es-ES', 'it-IT', 'th-TH', 'id-ID'] - word_limit: 3500 - audio_type: 'mp3' - max_workers: 5 -pricing: - input: '0.015' - output: '0' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openai/tts/tts.py b/api/core/model_runtime/model_providers/openai/tts/tts.py deleted file mode 100644 index a14c91639b..0000000000 --- a/api/core/model_runtime/model_providers/openai/tts/tts.py +++ /dev/null @@ -1,118 +0,0 @@ -import concurrent.futures -from typing import Optional - -from openai import OpenAI - -from core.model_runtime.errors.invoke import InvokeBadRequestError -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.tts_model import TTSModel -from core.model_runtime.model_providers.openai._common import _CommonOpenAI - - -class OpenAIText2SpeechModel(_CommonOpenAI, TTSModel): - """ - Model class for OpenAI Speech to text model. - """ - - def _invoke( - self, model: str, tenant_id: str, credentials: dict, content_text: str, voice: str, user: Optional[str] = None - ) -> any: - """ - _invoke text2speech model - - :param model: model name - :param tenant_id: user tenant id - :param credentials: model credentials - :param content_text: text content to be translated - :param voice: model timbre - :param user: unique user id - :return: text translated to audio file - """ - - if not voice or voice not in [ - d["value"] for d in self.get_tts_model_voices(model=model, credentials=credentials) - ]: - voice = self._get_model_default_voice(model, credentials) - # if streaming: - return self._tts_invoke_streaming(model=model, credentials=credentials, content_text=content_text, voice=voice) - - def validate_credentials(self, model: str, credentials: dict, user: Optional[str] = None) -> None: - """ - validate credentials text2speech model - - :param model: model name - :param credentials: model credentials - :param user: unique user id - :return: text translated to audio file - """ - try: - self._tts_invoke_streaming( - model=model, - credentials=credentials, - content_text="Hello Dify!", - voice=self._get_model_default_voice(model, credentials), - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str, voice: str) -> any: - """ - _tts_invoke_streaming text2speech model - - :param model: model name - :param credentials: model credentials - :param content_text: text content to be translated - :param voice: model timbre - :return: text translated to audio file - """ - try: - # doc: https://platform.openai.com/docs/guides/text-to-speech - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - model_support_voice = [ - x.get("value") for x in self.get_tts_model_voices(model=model, credentials=credentials) - ] - if not voice or voice not in model_support_voice: - voice = self._get_model_default_voice(model, credentials) - word_limit = self._get_model_word_limit(model, credentials) - if len(content_text) > word_limit: - sentences = self._split_text_into_sentences(content_text, max_length=word_limit) - executor = concurrent.futures.ThreadPoolExecutor(max_workers=min(3, len(sentences))) - futures = [ - executor.submit( - client.audio.speech.with_streaming_response.create, - model=model, - response_format="mp3", - input=sentences[i], - voice=voice, - ) - for i in range(len(sentences)) - ] - for future in futures: - yield from future.result().__enter__().iter_bytes(1024) # noqa:PLC2801 - - else: - response = client.audio.speech.with_streaming_response.create( - model=model, voice=voice, response_format="mp3", input=content_text.strip() - ) - - yield from response.__enter__().iter_bytes(1024) # noqa:PLC2801 - except Exception as ex: - raise InvokeBadRequestError(str(ex)) - - def _process_sentence(self, sentence: str, model: str, voice, credentials: dict): - """ - _tts_invoke openai text2speech model api - - :param model: model name - :param credentials: model credentials - :param voice: model timbre - :param sentence: text content to be translated - :return: text translated to audio file - """ - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - response = client.audio.speech.create(model=model, voice=voice, input=sentence.strip()) - if isinstance(response.read(), bytes): - return response.read() diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/__init__.py b/api/core/model_runtime/model_providers/openai_api_compatible/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/_common.py b/api/core/model_runtime/model_providers/openai_api_compatible/_common.py deleted file mode 100644 index 1234e44f80..0000000000 --- a/api/core/model_runtime/model_providers/openai_api_compatible/_common.py +++ /dev/null @@ -1,43 +0,0 @@ -import requests - -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) - - -class _CommonOaiApiCompat: - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeAuthorizationError: [ - requests.exceptions.InvalidHeader, # Missing or Invalid API Key - ], - InvokeBadRequestError: [ - requests.exceptions.HTTPError, # Invalid Endpoint URL or model name - requests.exceptions.InvalidURL, # Misconfigured request or other API error - ], - InvokeRateLimitError: [ - requests.exceptions.RetryError # Too many requests sent in a short period of time - ], - InvokeServerUnavailableError: [ - requests.exceptions.ConnectionError, # Engine Overloaded - requests.exceptions.HTTPError, # Server Error - ], - InvokeConnectionError: [ - requests.exceptions.ConnectTimeout, # Timeout - requests.exceptions.ReadTimeout, # Timeout - ], - } diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/llm/__init__.py b/api/core/model_runtime/model_providers/openai_api_compatible/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py deleted file mode 100644 index c2ffe653c8..0000000000 --- a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py +++ /dev/null @@ -1,828 +0,0 @@ -import json -import logging -from collections.abc import Generator -from decimal import Decimal -from typing import Optional, Union, cast -from urllib.parse import urljoin - -import requests - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContent, - PromptMessageContentType, - PromptMessageFunction, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - DefaultParameterName, - FetchFrom, - ModelFeature, - ModelPropertyKey, - ModelType, - ParameterRule, - ParameterType, - PriceConfig, -) -from core.model_runtime.errors.invoke import InvokeError -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.openai_api_compatible._common import _CommonOaiApiCompat -from core.model_runtime.utils import helper - -logger = logging.getLogger(__name__) - - -class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel): - """ - Model class for OpenAI large language model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - - # text completion model - return self._generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: - :param credentials: - :param prompt_messages: - :param tools: tools for tool calling - :return: - """ - return self._num_tokens_from_messages(model, prompt_messages, tools, credentials) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials using requests to ensure compatibility with all providers following - OpenAI's API standard. - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - headers = {"Content-Type": "application/json"} - - api_key = credentials.get("api_key") - if api_key: - headers["Authorization"] = f"Bearer {api_key}" - - endpoint_url = credentials["endpoint_url"] - if not endpoint_url.endswith("/"): - endpoint_url += "/" - - # prepare the payload for a simple ping to the model - data = {"model": model, "max_tokens": 5} - - completion_type = LLMMode.value_of(credentials["mode"]) - - if completion_type is LLMMode.CHAT: - data["messages"] = [ - {"role": "user", "content": "ping"}, - ] - endpoint_url = urljoin(endpoint_url, "chat/completions") - elif completion_type is LLMMode.COMPLETION: - data["prompt"] = "ping" - endpoint_url = urljoin(endpoint_url, "completions") - else: - raise ValueError("Unsupported completion type for model configuration.") - - # send a post request to validate the credentials - response = requests.post(endpoint_url, headers=headers, json=data, timeout=(10, 300)) - - if response.status_code != 200: - raise CredentialsValidateFailedError( - f"Credentials validation failed with status code {response.status_code}" - ) - - try: - json_result = response.json() - except json.JSONDecodeError as e: - raise CredentialsValidateFailedError("Credentials validation failed: JSON decode error") - - if completion_type is LLMMode.CHAT and json_result.get("object", "") == "": - json_result["object"] = "chat.completion" - elif completion_type is LLMMode.COMPLETION and json_result.get("object", "") == "": - json_result["object"] = "text_completion" - - if completion_type is LLMMode.CHAT and ( - "object" not in json_result or json_result["object"] != "chat.completion" - ): - raise CredentialsValidateFailedError( - "Credentials validation failed: invalid response object, must be 'chat.completion'" - ) - elif completion_type is LLMMode.COMPLETION and ( - "object" not in json_result or json_result["object"] != "text_completion" - ): - raise CredentialsValidateFailedError( - "Credentials validation failed: invalid response object, must be 'text_completion'" - ) - except CredentialsValidateFailedError: - raise - except Exception as ex: - raise CredentialsValidateFailedError(f"An error occurred during credentials validation: {str(ex)}") - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - generate custom model entities from credentials - """ - features = [] - - function_calling_type = credentials.get("function_calling_type", "no_call") - if function_calling_type == "function_call": - features.append(ModelFeature.TOOL_CALL) - elif function_calling_type == "tool_call": - features.append(ModelFeature.MULTI_TOOL_CALL) - - stream_function_calling = credentials.get("stream_function_calling", "supported") - if stream_function_calling == "supported": - features.append(ModelFeature.STREAM_TOOL_CALL) - - vision_support = credentials.get("vision_support", "not_support") - if vision_support == "support": - features.append(ModelFeature.VISION) - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - model_type=ModelType.LLM, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - features=features, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", "4096")), - ModelPropertyKey.MODE: credentials.get("mode"), - }, - parameter_rules=[ - ParameterRule( - name=DefaultParameterName.TEMPERATURE.value, - label=I18nObject(en_US="Temperature", zh_Hans="温度"), - help=I18nObject( - en_US="Kernel sampling threshold. Used to determine the randomness of the results." - "The higher the value, the stronger the randomness." - "The higher the possibility of getting different answers to the same question.", - zh_Hans="核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。", - ), - type=ParameterType.FLOAT, - default=float(credentials.get("temperature", 0.7)), - min=0, - max=2, - precision=2, - ), - ParameterRule( - name=DefaultParameterName.TOP_P.value, - label=I18nObject(en_US="Top P", zh_Hans="Top P"), - help=I18nObject( - en_US="The probability threshold of the nucleus sampling method during the generation process." - "The larger the value is, the higher the randomness of generation will be." - "The smaller the value is, the higher the certainty of generation will be.", - zh_Hans="生成过程中核采样方法概率阈值。取值越大,生成的随机性越高;取值越小,生成的确定性越高。", - ), - type=ParameterType.FLOAT, - default=float(credentials.get("top_p", 1)), - min=0, - max=1, - precision=2, - ), - ParameterRule( - name=DefaultParameterName.FREQUENCY_PENALTY.value, - label=I18nObject(en_US="Frequency Penalty", zh_Hans="频率惩罚"), - help=I18nObject( - en_US="For controlling the repetition rate of words used by the model." - "Increasing this can reduce the repetition of the same words in the model's output.", - zh_Hans="用于控制模型已使用字词的重复率。 提高此项可以降低模型在输出中重复相同字词的重复度。", - ), - type=ParameterType.FLOAT, - default=float(credentials.get("frequency_penalty", 0)), - min=-2, - max=2, - ), - ParameterRule( - name=DefaultParameterName.PRESENCE_PENALTY.value, - label=I18nObject(en_US="Presence Penalty", zh_Hans="存在惩罚"), - help=I18nObject( - en_US="Used to control the repetition rate when generating models." - "Increasing this can reduce the repetition rate of model generation.", - zh_Hans="用于控制模型生成时的重复度。提高此项可以降低模型生成的重复度。", - ), - type=ParameterType.FLOAT, - default=float(credentials.get("presence_penalty", 0)), - min=-2, - max=2, - ), - ParameterRule( - name=DefaultParameterName.MAX_TOKENS.value, - label=I18nObject(en_US="Max Tokens", zh_Hans="最大标记"), - help=I18nObject( - en_US="Maximum length of tokens for the model response.", zh_Hans="模型回答的tokens的最大长度。" - ), - type=ParameterType.INT, - default=512, - min=1, - max=int(credentials.get("max_tokens_to_sample", 4096)), - ), - ], - pricing=PriceConfig( - input=Decimal(credentials.get("input_price", 0)), - output=Decimal(credentials.get("output_price", 0)), - unit=Decimal(credentials.get("unit", 0)), - currency=credentials.get("currency", "USD"), - ), - ) - - if credentials["mode"] == "chat": - entity.model_properties[ModelPropertyKey.MODE] = LLMMode.CHAT.value - elif credentials["mode"] == "completion": - entity.model_properties[ModelPropertyKey.MODE] = LLMMode.COMPLETION.value - else: - raise ValueError(f"Unknown completion type {credentials['completion_type']}") - - return entity - - # validate_credentials method has been rewritten to use the requests library for compatibility with all providers - # following OpenAI's API standard. - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke llm completion model - - :param model: model name - :param credentials: credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - headers = { - "Content-Type": "application/json", - "Accept-Charset": "utf-8", - } - extra_headers = credentials.get("extra_headers") - if extra_headers is not None: - headers = { - **headers, - **extra_headers, - } - - api_key = credentials.get("api_key") - if api_key: - headers["Authorization"] = f"Bearer {api_key}" - - endpoint_url = credentials["endpoint_url"] - if not endpoint_url.endswith("/"): - endpoint_url += "/" - - data = {"model": model, "stream": stream, **model_parameters} - - completion_type = LLMMode.value_of(credentials["mode"]) - - if completion_type is LLMMode.CHAT: - endpoint_url = urljoin(endpoint_url, "chat/completions") - data["messages"] = [self._convert_prompt_message_to_dict(m, credentials) for m in prompt_messages] - elif completion_type is LLMMode.COMPLETION: - endpoint_url = urljoin(endpoint_url, "completions") - data["prompt"] = prompt_messages[0].content - else: - raise ValueError("Unsupported completion type for model configuration.") - - # annotate tools with names, descriptions, etc. - function_calling_type = credentials.get("function_calling_type", "no_call") - formatted_tools = [] - if tools: - if function_calling_type == "function_call": - data["functions"] = [ - {"name": tool.name, "description": tool.description, "parameters": tool.parameters} - for tool in tools - ] - elif function_calling_type == "tool_call": - data["tool_choice"] = "auto" - - for tool in tools: - formatted_tools.append(helper.dump_model(PromptMessageFunction(function=tool))) - - data["tools"] = formatted_tools - - if stop: - data["stop"] = stop - - if user: - data["user"] = user - - response = requests.post(endpoint_url, headers=headers, json=data, timeout=(10, 300), stream=stream) - - if response.encoding is None or response.encoding == "ISO-8859-1": - response.encoding = "utf-8" - - if response.status_code != 200: - raise InvokeError(f"API request failed with status code {response.status_code}: {response.text}") - - if stream: - return self._handle_generate_stream_response(model, credentials, response, prompt_messages) - - return self._handle_generate_response(model, credentials, response, prompt_messages) - - def _handle_generate_stream_response( - self, model: str, credentials: dict, response: requests.Response, prompt_messages: list[PromptMessage] - ) -> Generator: - """ - Handle llm stream response - - :param model: model name - :param credentials: model credentials - :param response: streamed response - :param prompt_messages: prompt messages - :return: llm response chunk generator - """ - full_assistant_content = "" - chunk_index = 0 - - def create_final_llm_result_chunk( - index: int, message: AssistantPromptMessage, finish_reason: str - ) -> LLMResultChunk: - # calculate num tokens - prompt_tokens = self._num_tokens_from_string(model, prompt_messages[0].content) - completion_tokens = self._num_tokens_from_string(model, full_assistant_content) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - return LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta(index=index, message=message, finish_reason=finish_reason, usage=usage), - ) - - # delimiter for stream response, need unicode_escape - import codecs - - delimiter = credentials.get("stream_mode_delimiter", "\n\n") - delimiter = codecs.decode(delimiter, "unicode_escape") - - tools_calls: list[AssistantPromptMessage.ToolCall] = [] - - def increase_tool_call(new_tool_calls: list[AssistantPromptMessage.ToolCall]): - def get_tool_call(tool_call_id: str): - if not tool_call_id: - return tools_calls[-1] - - tool_call = next((tool_call for tool_call in tools_calls if tool_call.id == tool_call_id), None) - if tool_call is None: - tool_call = AssistantPromptMessage.ToolCall( - id=tool_call_id, - type="function", - function=AssistantPromptMessage.ToolCall.ToolCallFunction(name="", arguments=""), - ) - tools_calls.append(tool_call) - - return tool_call - - for new_tool_call in new_tool_calls: - # get tool call - tool_call = get_tool_call(new_tool_call.function.name) - # update tool call - if new_tool_call.id: - tool_call.id = new_tool_call.id - if new_tool_call.type: - tool_call.type = new_tool_call.type - if new_tool_call.function.name: - tool_call.function.name = new_tool_call.function.name - if new_tool_call.function.arguments: - tool_call.function.arguments += new_tool_call.function.arguments - - finish_reason = None # The default value of finish_reason is None - - for chunk in response.iter_lines(decode_unicode=True, delimiter=delimiter): - chunk = chunk.strip() - if chunk: - # ignore sse comments - if chunk.startswith(":"): - continue - decoded_chunk = chunk.strip().lstrip("data: ").lstrip() - if decoded_chunk == "[DONE]": # Some provider returns "data: [DONE]" - continue - - try: - chunk_json = json.loads(decoded_chunk) - # stream ended - except json.JSONDecodeError as e: - yield create_final_llm_result_chunk( - index=chunk_index + 1, - message=AssistantPromptMessage(content=""), - finish_reason="Non-JSON encountered.", - ) - break - if not chunk_json or len(chunk_json["choices"]) == 0: - continue - - choice = chunk_json["choices"][0] - finish_reason = chunk_json["choices"][0].get("finish_reason") - chunk_index += 1 - - if "delta" in choice: - delta = choice["delta"] - delta_content = delta.get("content") - - assistant_message_tool_calls = None - - if "tool_calls" in delta and credentials.get("function_calling_type", "no_call") == "tool_call": - assistant_message_tool_calls = delta.get("tool_calls", None) - elif ( - "function_call" in delta - and credentials.get("function_calling_type", "no_call") == "function_call" - ): - assistant_message_tool_calls = [ - {"id": "tool_call_id", "type": "function", "function": delta.get("function_call", {})} - ] - - # assistant_message_function_call = delta.delta.function_call - - # extract tool calls from response - if assistant_message_tool_calls: - tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls) - increase_tool_call(tool_calls) - - if delta_content is None or delta_content == "": - continue - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=delta_content, - ) - - # reset tool calls - tool_calls = [] - full_assistant_content += delta_content - elif "text" in choice: - choice_text = choice.get("text", "") - if choice_text == "": - continue - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=choice_text) - full_assistant_content += choice_text - else: - continue - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=chunk_index, - message=assistant_prompt_message, - ), - ) - - chunk_index += 1 - - if tools_calls: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=chunk_index, - message=AssistantPromptMessage(tool_calls=tools_calls, content=""), - ), - ) - - yield create_final_llm_result_chunk( - index=chunk_index, message=AssistantPromptMessage(content=""), finish_reason=finish_reason - ) - - def _handle_generate_response( - self, model: str, credentials: dict, response: requests.Response, prompt_messages: list[PromptMessage] - ) -> LLMResult: - response_json = response.json() - - completion_type = LLMMode.value_of(credentials["mode"]) - - output = response_json["choices"][0] - - response_content = "" - tool_calls = None - function_calling_type = credentials.get("function_calling_type", "no_call") - if completion_type is LLMMode.CHAT: - response_content = output.get("message", {})["content"] - if function_calling_type == "tool_call": - tool_calls = output.get("message", {}).get("tool_calls") - elif function_calling_type == "function_call": - tool_calls = output.get("message", {}).get("function_call") - - elif completion_type is LLMMode.COMPLETION: - response_content = output["text"] - - assistant_message = AssistantPromptMessage(content=response_content, tool_calls=[]) - - if tool_calls: - if function_calling_type == "tool_call": - assistant_message.tool_calls = self._extract_response_tool_calls(tool_calls) - elif function_calling_type == "function_call": - assistant_message.tool_calls = [self._extract_response_function_call(tool_calls)] - - usage = response_json.get("usage") - if usage: - # transform usage - prompt_tokens = usage["prompt_tokens"] - completion_tokens = usage["completion_tokens"] - else: - # calculate num tokens - prompt_tokens = self._num_tokens_from_string(model, prompt_messages[0].content) - completion_tokens = self._num_tokens_from_string(model, assistant_message.content) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - result = LLMResult( - model=response_json["model"], - prompt_messages=prompt_messages, - message=assistant_message, - usage=usage, - ) - - return result - - def _convert_prompt_message_to_dict(self, message: PromptMessage, credentials: Optional[dict] = None) -> dict: - """ - Convert PromptMessage to dict for OpenAI API format - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - sub_messages = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(PromptMessageContent, message_content) - sub_message_dict = {"type": "text", "text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - sub_message_dict = { - "type": "image_url", - "image_url": {"url": message_content.data, "detail": message_content.detail.value}, - } - sub_messages.append(sub_message_dict) - - message_dict = {"role": "user", "content": sub_messages} - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - if message.tool_calls: - function_calling_type = credentials.get("function_calling_type", "no_call") - if function_calling_type == "tool_call": - message_dict["tool_calls"] = [tool_call.dict() for tool_call in message.tool_calls] - elif function_calling_type == "function_call": - function_call = message.tool_calls[0] - message_dict["function_call"] = { - "name": function_call.function.name, - "arguments": function_call.function.arguments, - } - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - function_calling_type = credentials.get("function_calling_type", "no_call") - if function_calling_type == "tool_call": - message_dict = {"role": "tool", "content": message.content, "tool_call_id": message.tool_call_id} - elif function_calling_type == "function_call": - message_dict = {"role": "function", "content": message.content, "name": message.tool_call_id} - else: - raise ValueError(f"Got unknown type {message}") - - if message.name and message_dict.get("role", "") != "tool": - message_dict["name"] = message.name - - return message_dict - - def _num_tokens_from_string( - self, model: str, text: Union[str, list[PromptMessageContent]], tools: Optional[list[PromptMessageTool]] = None - ) -> int: - """ - Approximate num tokens for model with gpt2 tokenizer. - - :param model: model name - :param text: prompt text - :param tools: tools for tool calling - :return: number of tokens - """ - if isinstance(text, str): - full_text = text - else: - full_text = "" - for message_content in text: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(PromptMessageContent, message_content) - full_text += message_content.data - - num_tokens = self._get_num_tokens_by_gpt2(full_text) - - if tools: - num_tokens += self._num_tokens_for_tools(tools) - - return num_tokens - - def _num_tokens_from_messages( - self, - model: str, - messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - credentials: dict = None, - ) -> int: - """ - Approximate num tokens with GPT2 tokenizer. - """ - - tokens_per_message = 3 - tokens_per_name = 1 - - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m, credentials) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - # Cast str(value) in case the message value is not a string - # This occurs with function messages - # TODO: The current token calculation method for the image type is not implemented, - # which need to download the image and then get the resolution for calculation, - # and will increase the request delay - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - if key == "tool_calls": - for tool_call in value: - for t_key, t_value in tool_call.items(): - num_tokens += self._get_num_tokens_by_gpt2(t_key) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += self._get_num_tokens_by_gpt2(f_key) - num_tokens += self._get_num_tokens_by_gpt2(f_value) - else: - num_tokens += self._get_num_tokens_by_gpt2(t_key) - num_tokens += self._get_num_tokens_by_gpt2(t_value) - else: - num_tokens += self._get_num_tokens_by_gpt2(str(value)) - - if key == "name": - num_tokens += tokens_per_name - - # every reply is primed with assistant - num_tokens += 3 - - if tools: - num_tokens += self._num_tokens_for_tools(tools) - - return num_tokens - - def _num_tokens_for_tools(self, tools: list[PromptMessageTool]) -> int: - """ - Calculate num tokens for tool calling with tiktoken package. - - :param tools: tools for tool calling - :return: number of tokens - """ - num_tokens = 0 - for tool in tools: - num_tokens += self._get_num_tokens_by_gpt2("type") - num_tokens += self._get_num_tokens_by_gpt2("function") - num_tokens += self._get_num_tokens_by_gpt2("function") - - # calculate num tokens for function object - num_tokens += self._get_num_tokens_by_gpt2("name") - num_tokens += self._get_num_tokens_by_gpt2(tool.name) - num_tokens += self._get_num_tokens_by_gpt2("description") - num_tokens += self._get_num_tokens_by_gpt2(tool.description) - parameters = tool.parameters - num_tokens += self._get_num_tokens_by_gpt2("parameters") - if "title" in parameters: - num_tokens += self._get_num_tokens_by_gpt2("title") - num_tokens += self._get_num_tokens_by_gpt2(parameters.get("title")) - num_tokens += self._get_num_tokens_by_gpt2("type") - num_tokens += self._get_num_tokens_by_gpt2(parameters.get("type")) - if "properties" in parameters: - num_tokens += self._get_num_tokens_by_gpt2("properties") - for key, value in parameters.get("properties").items(): - num_tokens += self._get_num_tokens_by_gpt2(key) - for field_key, field_value in value.items(): - num_tokens += self._get_num_tokens_by_gpt2(field_key) - if field_key == "enum": - for enum_field in field_value: - num_tokens += 3 - num_tokens += self._get_num_tokens_by_gpt2(enum_field) - else: - num_tokens += self._get_num_tokens_by_gpt2(field_key) - num_tokens += self._get_num_tokens_by_gpt2(str(field_value)) - if "required" in parameters: - num_tokens += self._get_num_tokens_by_gpt2("required") - for required_field in parameters["required"]: - num_tokens += 3 - num_tokens += self._get_num_tokens_by_gpt2(required_field) - - return num_tokens - - def _extract_response_tool_calls(self, response_tool_calls: list[dict]) -> list[AssistantPromptMessage.ToolCall]: - """ - Extract tool calls from response - - :param response_tool_calls: response tool calls - :return: list of tool calls - """ - tool_calls = [] - if response_tool_calls: - for response_tool_call in response_tool_calls: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_tool_call.get("function", {}).get("name", ""), - arguments=response_tool_call.get("function", {}).get("arguments", ""), - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_tool_call.get("id", ""), type=response_tool_call.get("type", ""), function=function - ) - tool_calls.append(tool_call) - - return tool_calls - - def _extract_response_function_call(self, response_function_call) -> AssistantPromptMessage.ToolCall: - """ - Extract function call from response - - :param response_function_call: response function call - :return: tool call - """ - tool_call = None - if response_function_call: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_function_call.get("name", ""), arguments=response_function_call.get("arguments", "") - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_function_call.get("id", ""), type="function", function=function - ) - - return tool_call diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.py b/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.py deleted file mode 100644 index ca6f185287..0000000000 --- a/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class OAICompatProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml b/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml deleted file mode 100644 index 88c76fe16e..0000000000 --- a/api/core/model_runtime/model_providers/openai_api_compatible/openai_api_compatible.yaml +++ /dev/null @@ -1,162 +0,0 @@ -provider: openai_api_compatible -label: - en_US: OpenAI-API-compatible -description: - en_US: Model providers compatible with OpenAI's API standard, such as LM Studio. - zh_Hans: 兼容 OpenAI API 的模型供应商,例如 LM Studio 。 -supported_model_types: - - llm - - text-embedding - - speech2text -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter full model name - zh_Hans: 输入模型全称 - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: false - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: endpoint_url - label: - zh_Hans: API endpoint URL - en_US: API endpoint URL - type: text-input - required: true - placeholder: - zh_Hans: Base URL, e.g. https://api.openai.com/v1 - en_US: Base URL, e.g. https://api.openai.com/v1 - - variable: mode - show_on: - - variable: __model_type - value: llm - label: - en_US: Completion mode - type: select - required: false - default: chat - placeholder: - zh_Hans: 选择对话类型 - en_US: Select completion mode - options: - - value: completion - label: - en_US: Completion - zh_Hans: 补全 - - value: chat - label: - en_US: Chat - zh_Hans: 对话 - - variable: context_size - label: - zh_Hans: 模型上下文长度 - en_US: Model context size - required: true - show_on: - - variable: __model_type - value: llm - type: text-input - default: '4096' - placeholder: - zh_Hans: 在此输入您的模型上下文长度 - en_US: Enter your Model context size - - variable: context_size - label: - zh_Hans: 模型上下文长度 - en_US: Model context size - required: true - show_on: - - variable: __model_type - value: text-embedding - type: text-input - default: '4096' - placeholder: - zh_Hans: 在此输入您的模型上下文长度 - en_US: Enter your Model context size - - variable: max_tokens_to_sample - label: - zh_Hans: 最大 token 上限 - en_US: Upper bound for max tokens - show_on: - - variable: __model_type - value: llm - default: '4096' - type: text-input - - variable: function_calling_type - show_on: - - variable: __model_type - value: llm - label: - en_US: Function calling - type: select - required: false - default: no_call - options: - - value: function_call - label: - en_US: Function Call - zh_Hans: Function Call - - value: tool_call - label: - en_US: Tool Call - zh_Hans: Tool Call - - value: no_call - label: - en_US: Not Support - zh_Hans: 不支持 - - variable: stream_function_calling - show_on: - - variable: __model_type - value: llm - label: - en_US: Stream function calling - type: select - required: false - default: not_supported - options: - - value: supported - label: - en_US: Support - zh_Hans: 支持 - - value: not_supported - label: - en_US: Not Support - zh_Hans: 不支持 - - variable: vision_support - show_on: - - variable: __model_type - value: llm - label: - zh_Hans: Vision 支持 - en_US: Vision Support - type: select - required: false - default: no_support - options: - - value: support - label: - en_US: Support - zh_Hans: 支持 - - value: no_support - label: - en_US: Not Support - zh_Hans: 不支持 - - variable: stream_mode_delimiter - label: - zh_Hans: 流模式返回结果的分隔符 - en_US: Delimiter for streaming results - show_on: - - variable: __model_type - value: llm - default: '\n\n' - type: text-input diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/speech2text/__init__.py b/api/core/model_runtime/model_providers/openai_api_compatible/speech2text/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/speech2text/speech2text.py b/api/core/model_runtime/model_providers/openai_api_compatible/speech2text/speech2text.py deleted file mode 100644 index 405096578c..0000000000 --- a/api/core/model_runtime/model_providers/openai_api_compatible/speech2text/speech2text.py +++ /dev/null @@ -1,61 +0,0 @@ -from typing import IO, Optional -from urllib.parse import urljoin - -import requests - -from core.model_runtime.errors.invoke import InvokeBadRequestError -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel -from core.model_runtime.model_providers.openai_api_compatible._common import _CommonOaiApiCompat - - -class OAICompatSpeech2TextModel(_CommonOaiApiCompat, Speech2TextModel): - """ - Model class for OpenAI Compatible Speech to text model. - """ - - def _invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) -> str: - """ - Invoke speech2text model - - :param model: model name - :param credentials: model credentials - :param file: audio file - :param user: unique user id - :return: text for given audio file - """ - headers = {} - - api_key = credentials.get("api_key") - if api_key: - headers["Authorization"] = f"Bearer {api_key}" - - endpoint_url = credentials.get("endpoint_url") - if not endpoint_url.endswith("/"): - endpoint_url += "/" - endpoint_url = urljoin(endpoint_url, "audio/transcriptions") - - payload = {"model": model} - files = [("file", file)] - response = requests.post(endpoint_url, headers=headers, data=payload, files=files) - - if response.status_code != 200: - raise InvokeBadRequestError(response.text) - response_data = response.json() - return response_data["text"] - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - audio_file_path = self._get_demo_file_path() - - with open(audio_file_path, "rb") as audio_file: - self._invoke(model, credentials, audio_file) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/__init__.py b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openllm/__init__.py b/api/core/model_runtime/model_providers/openllm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openllm/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/openllm/_assets/icon_l_en.svg deleted file mode 100644 index 59bb57992c..0000000000 --- a/api/core/model_runtime/model_providers/openllm/_assets/icon_l_en.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/openllm/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/openllm/_assets/icon_s_en.svg deleted file mode 100644 index d25d627020..0000000000 --- a/api/core/model_runtime/model_providers/openllm/_assets/icon_s_en.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/openllm/llm/__init__.py b/api/core/model_runtime/model_providers/openllm/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openllm/llm/llm.py b/api/core/model_runtime/model_providers/openllm/llm/llm.py deleted file mode 100644 index 34b4de7962..0000000000 --- a/api/core/model_runtime/model_providers/openllm/llm/llm.py +++ /dev/null @@ -1,264 +0,0 @@ -from collections.abc import Generator - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - ModelPropertyKey, - ModelType, - ParameterRule, - ParameterType, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.openllm.llm.openllm_generate import OpenLLMGenerate, OpenLLMGenerateMessage -from core.model_runtime.model_providers.openllm.llm.openllm_generate_errors import ( - BadRequestError, - InsufficientAccountBalanceError, - InternalServerError, - InvalidAPIKeyError, - InvalidAuthenticationError, - RateLimitReachedError, -) - - -class OpenLLMLargeLanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - return self._generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate credentials for Baichuan model - """ - if not credentials.get("server_url"): - raise CredentialsValidateFailedError("Invalid server URL") - - # ping - instance = OpenLLMGenerate() - try: - instance.generate( - server_url=credentials["server_url"], - model_name=model, - prompt_messages=[OpenLLMGenerateMessage(content="ping\nAnswer: ", role="user")], - model_parameters={ - "max_tokens": 64, - "temperature": 0.8, - "top_p": 0.9, - "top_k": 15, - }, - stream=False, - user="", - stop=[], - ) - except InvalidAuthenticationError as e: - raise CredentialsValidateFailedError(f"Invalid API key: {e}") - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool] | None = None, - ) -> int: - return self._num_tokens_from_messages(prompt_messages, tools) - - def _num_tokens_from_messages(self, messages: list[PromptMessage], tools: list[PromptMessageTool]) -> int: - """ - Calculate num tokens for OpenLLM model - it's a generate model, so we just join them by spe - """ - messages = ",".join([message.content for message in messages]) - return self._get_num_tokens_by_gpt2(messages) - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - client = OpenLLMGenerate() - response = client.generate( - model_name=model, - server_url=credentials["server_url"], - prompt_messages=[self._convert_prompt_message_to_openllm_message(message) for message in prompt_messages], - model_parameters=model_parameters, - stop=stop, - stream=stream, - user=user, - ) - - if stream: - return self._handle_chat_generate_stream_response( - model=model, prompt_messages=prompt_messages, credentials=credentials, response=response - ) - return self._handle_chat_generate_response( - model=model, prompt_messages=prompt_messages, credentials=credentials, response=response - ) - - def _convert_prompt_message_to_openllm_message(self, prompt_message: PromptMessage) -> OpenLLMGenerateMessage: - """ - convert PromptMessage to OpenLLMGenerateMessage so that we can use OpenLLMGenerateMessage interface - """ - if isinstance(prompt_message, UserPromptMessage): - return OpenLLMGenerateMessage(role=OpenLLMGenerateMessage.Role.USER.value, content=prompt_message.content) - elif isinstance(prompt_message, AssistantPromptMessage): - return OpenLLMGenerateMessage( - role=OpenLLMGenerateMessage.Role.ASSISTANT.value, content=prompt_message.content - ) - else: - raise NotImplementedError(f"Prompt message type {type(prompt_message)} is not supported") - - def _handle_chat_generate_response( - self, model: str, prompt_messages: list[PromptMessage], credentials: dict, response: OpenLLMGenerateMessage - ) -> LLMResult: - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=response.usage["prompt_tokens"], - completion_tokens=response.usage["completion_tokens"], - ) - return LLMResult( - model=model, - prompt_messages=prompt_messages, - message=AssistantPromptMessage( - content=response.content, - tool_calls=[], - ), - usage=usage, - ) - - def _handle_chat_generate_stream_response( - self, - model: str, - prompt_messages: list[PromptMessage], - credentials: dict, - response: Generator[OpenLLMGenerateMessage, None, None], - ) -> Generator[LLMResultChunk, None, None]: - for message in response: - if message.usage: - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=message.usage["prompt_tokens"], - completion_tokens=message.usage["completion_tokens"], - ) - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=message.content, tool_calls=[]), - usage=usage, - finish_reason=message.stop_reason or None, - ), - ) - else: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=message.content, tool_calls=[]), - finish_reason=message.stop_reason or None, - ), - ) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - rules = [ - ParameterRule( - name="temperature", - type=ParameterType.FLOAT, - use_template="temperature", - label=I18nObject(zh_Hans="温度", en_US="Temperature"), - ), - ParameterRule( - name="top_p", - type=ParameterType.FLOAT, - use_template="top_p", - label=I18nObject(zh_Hans="Top P", en_US="Top P"), - ), - ParameterRule( - name="top_k", - type=ParameterType.INT, - use_template="top_k", - min=1, - default=1, - label=I18nObject(zh_Hans="Top K", en_US="Top K"), - ), - ParameterRule( - name="max_tokens", - type=ParameterType.INT, - use_template="max_tokens", - min=1, - default=512, - label=I18nObject(zh_Hans="最大生成长度", en_US="Max Tokens"), - ), - ] - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.LLM, - model_properties={ - ModelPropertyKey.MODE: LLMMode.COMPLETION.value, - }, - parameter_rules=rules, - ) - - return entity - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [], - InvokeServerUnavailableError: [InternalServerError], - InvokeRateLimitError: [RateLimitReachedError], - InvokeAuthorizationError: [ - InvalidAuthenticationError, - InsufficientAccountBalanceError, - InvalidAPIKeyError, - ], - InvokeBadRequestError: [BadRequestError, KeyError], - } diff --git a/api/core/model_runtime/model_providers/openllm/llm/openllm_generate.py b/api/core/model_runtime/model_providers/openllm/llm/openllm_generate.py deleted file mode 100644 index 351dcced15..0000000000 --- a/api/core/model_runtime/model_providers/openllm/llm/openllm_generate.py +++ /dev/null @@ -1,198 +0,0 @@ -from collections.abc import Generator -from enum import Enum -from json import dumps, loads -from typing import Any, Union - -from requests import Response, post -from requests.exceptions import ConnectionError, InvalidSchema, MissingSchema - -from core.model_runtime.model_providers.openllm.llm.openllm_generate_errors import ( - BadRequestError, - InternalServerError, - InvalidAuthenticationError, -) - - -class OpenLLMGenerateMessage: - class Role(Enum): - USER = "user" - ASSISTANT = "assistant" - - role: str = Role.USER.value - content: str - usage: dict[str, int] = None - stop_reason: str = "" - - def to_dict(self) -> dict[str, Any]: - return { - "role": self.role, - "content": self.content, - } - - def __init__(self, content: str, role: str = "user") -> None: - self.content = content - self.role = role - - -class OpenLLMGenerate: - def generate( - self, - server_url: str, - model_name: str, - stream: bool, - model_parameters: dict[str, Any], - stop: list[str], - prompt_messages: list[OpenLLMGenerateMessage], - user: str, - ) -> Union[Generator[OpenLLMGenerateMessage, None, None], OpenLLMGenerateMessage]: - if not server_url: - raise InvalidAuthenticationError("Invalid server URL") - - default_llm_config = { - "max_new_tokens": 128, - "min_length": 0, - "early_stopping": False, - "num_beams": 1, - "num_beam_groups": 1, - "use_cache": True, - "temperature": 0.75, - "top_k": 15, - "top_p": 0.9, - "typical_p": 1, - "epsilon_cutoff": 0, - "eta_cutoff": 0, - "diversity_penalty": 0, - "repetition_penalty": 1, - "encoder_repetition_penalty": 1, - "length_penalty": 1, - "no_repeat_ngram_size": 0, - "renormalize_logits": False, - "remove_invalid_values": False, - "num_return_sequences": 1, - "output_attentions": False, - "output_hidden_states": False, - "output_scores": False, - "encoder_no_repeat_ngram_size": 0, - "n": 1, - "presence_penalty": 0, - "frequency_penalty": 0, - "use_beam_search": False, - "ignore_eos": False, - "skip_special_tokens": True, - } - - if "max_tokens" in model_parameters and type(model_parameters["max_tokens"]) == int: - default_llm_config["max_new_tokens"] = model_parameters["max_tokens"] - - if "temperature" in model_parameters and type(model_parameters["temperature"]) == float: - default_llm_config["temperature"] = model_parameters["temperature"] - - if "top_p" in model_parameters and type(model_parameters["top_p"]) == float: - default_llm_config["top_p"] = model_parameters["top_p"] - - if "top_k" in model_parameters and type(model_parameters["top_k"]) == int: - default_llm_config["top_k"] = model_parameters["top_k"] - - if "use_cache" in model_parameters and type(model_parameters["use_cache"]) == bool: - default_llm_config["use_cache"] = model_parameters["use_cache"] - - headers = {"Content-Type": "application/json", "accept": "application/json"} - - if stream: - url = f"{server_url}/v1/generate_stream" - timeout = 10 - else: - url = f"{server_url}/v1/generate" - timeout = 120 - - data = { - "stop": stop or [], - "prompt": "\n".join([message.content for message in prompt_messages]), - "llm_config": default_llm_config, - } - - try: - response = post(url=url, data=dumps(data), timeout=timeout, stream=stream, headers=headers) - except (ConnectionError, InvalidSchema, MissingSchema) as e: - # cloud not connect to the server - raise InvalidAuthenticationError(f"Invalid server URL: {e}") - - if not response.ok: - resp = response.json() - msg = resp["msg"] - if response.status_code == 400: - raise BadRequestError(msg) - elif response.status_code == 404: - raise InvalidAuthenticationError(msg) - elif response.status_code == 500: - raise InternalServerError(msg) - else: - raise InternalServerError(msg) - - if stream: - return self._handle_chat_stream_generate_response(response) - return self._handle_chat_generate_response(response) - - def _handle_chat_generate_response(self, response: Response) -> OpenLLMGenerateMessage: - try: - data = response.json() - except Exception as e: - raise InternalServerError(f"Failed to convert response to json: {e} with text: {response.text}") - - message = data["outputs"][0] - text = message["text"] - token_ids = message["token_ids"] - prompt_token_ids = data["prompt_token_ids"] - stop_reason = message["finish_reason"] - - message = OpenLLMGenerateMessage(content=text, role=OpenLLMGenerateMessage.Role.ASSISTANT.value) - message.stop_reason = stop_reason - message.usage = { - "prompt_tokens": len(prompt_token_ids), - "completion_tokens": len(token_ids), - "total_tokens": len(prompt_token_ids) + len(token_ids), - } - - return message - - def _handle_chat_stream_generate_response( - self, response: Response - ) -> Generator[OpenLLMGenerateMessage, None, None]: - completion_usage = 0 - - for line in response.iter_lines(): - if not line: - continue - - line: str = line.decode("utf-8") - if line.startswith("data: "): - line = line[6:].strip() - - if line == "[DONE]": - return - - try: - data = loads(line) - except Exception as e: - raise InternalServerError(f"Failed to convert response to json: {e} with text: {line}") - - output = data["outputs"] - - for choice in output: - text = choice["text"] - token_ids = choice["token_ids"] - - completion_usage += len(token_ids) - message = OpenLLMGenerateMessage(content=text, role=OpenLLMGenerateMessage.Role.ASSISTANT.value) - - if choice.get("finish_reason"): - finish_reason = choice["finish_reason"] - prompt_token_ids = data["prompt_token_ids"] - message.stop_reason = finish_reason - message.usage = { - "prompt_tokens": len(prompt_token_ids), - "completion_tokens": completion_usage, - "total_tokens": completion_usage + len(prompt_token_ids), - } - - yield message diff --git a/api/core/model_runtime/model_providers/openllm/llm/openllm_generate_errors.py b/api/core/model_runtime/model_providers/openllm/llm/openllm_generate_errors.py deleted file mode 100644 index 309b5cf413..0000000000 --- a/api/core/model_runtime/model_providers/openllm/llm/openllm_generate_errors.py +++ /dev/null @@ -1,22 +0,0 @@ -class InvalidAuthenticationError(Exception): - pass - - -class InvalidAPIKeyError(Exception): - pass - - -class RateLimitReachedError(Exception): - pass - - -class InsufficientAccountBalanceError(Exception): - pass - - -class InternalServerError(Exception): - pass - - -class BadRequestError(Exception): - pass diff --git a/api/core/model_runtime/model_providers/openllm/openllm.py b/api/core/model_runtime/model_providers/openllm/openllm.py deleted file mode 100644 index 8014802144..0000000000 --- a/api/core/model_runtime/model_providers/openllm/openllm.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class OpenLLMProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/openllm/openllm.yaml b/api/core/model_runtime/model_providers/openllm/openllm.yaml deleted file mode 100644 index fef52695e3..0000000000 --- a/api/core/model_runtime/model_providers/openllm/openllm.yaml +++ /dev/null @@ -1,37 +0,0 @@ -provider: openllm -label: - en_US: OpenLLM -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#F9FAFB" -help: - title: - en_US: How to deploy OpenLLM - zh_Hans: 如何部署 OpenLLM - url: - en_US: https://github.com/bentoml/OpenLLM -supported_model_types: - - llm - - text-embedding -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: server_url - label: - zh_Hans: 服务器URL - en_US: Server url - type: text-input - required: true - placeholder: - zh_Hans: 在此输入OpenLLM的服务器地址,如 http://192.168.1.100:3000 - en_US: Enter the url of your OpenLLM, e.g. http://192.168.1.100:3000 diff --git a/api/core/model_runtime/model_providers/openllm/text_embedding/__init__.py b/api/core/model_runtime/model_providers/openllm/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openrouter/__init__.py b/api/core/model_runtime/model_providers/openrouter/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openrouter/_assets/openrouter.svg b/api/core/model_runtime/model_providers/openrouter/_assets/openrouter.svg deleted file mode 100644 index 2e9590d923..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/_assets/openrouter.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/openrouter/_assets/openrouter_square.svg b/api/core/model_runtime/model_providers/openrouter/_assets/openrouter_square.svg deleted file mode 100644 index ed81fc041f..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/_assets/openrouter_square.svg +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/openrouter/llm/__init__.py b/api/core/model_runtime/model_providers/openrouter/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/openrouter/llm/_position.yaml b/api/core/model_runtime/model_providers/openrouter/llm/_position.yaml deleted file mode 100644 index d9497b76b8..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/_position.yaml +++ /dev/null @@ -1,27 +0,0 @@ -- openai/o1-preview -- openai/o1-mini -- openai/gpt-4o -- openai/gpt-4o-mini -- openai/gpt-4 -- openai/gpt-4-32k -- openai/gpt-3.5-turbo -- anthropic/claude-3.5-sonnet -- anthropic/claude-3-haiku -- anthropic/claude-3-opus -- anthropic/claude-3-sonnet -- google/gemini-pro-1.5 -- google/gemini-flash-1.5 -- google/gemini-pro -- cohere/command-r-plus -- cohere/command-r -- meta-llama/llama-3.1-405b-instruct -- meta-llama/llama-3.1-70b-instruct -- meta-llama/llama-3.1-8b-instruct -- meta-llama/llama-3-70b-instruct -- meta-llama/llama-3-8b-instruct -- mistralai/mixtral-8x22b-instruct -- mistralai/mixtral-8x7b-instruct -- mistralai/mistral-7b-instruct -- qwen/qwen-2-72b-instruct -- deepseek/deepseek-chat -- deepseek/deepseek-coder diff --git a/api/core/model_runtime/model_providers/openrouter/llm/claude-3-5-sonnet.yaml b/api/core/model_runtime/model_providers/openrouter/llm/claude-3-5-sonnet.yaml deleted file mode 100644 index 40558854e2..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/claude-3-5-sonnet.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: anthropic/claude-3.5-sonnet -label: - en_US: claude-3.5-sonnet -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: "3.00" - output: "15.00" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/claude-3-haiku.yaml b/api/core/model_runtime/model_providers/openrouter/llm/claude-3-haiku.yaml deleted file mode 100644 index ce17d4123e..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/claude-3-haiku.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: anthropic/claude-3-haiku -label: - en_US: claude-3-haiku -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: "0.25" - output: "1.25" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/claude-3-opus.yaml b/api/core/model_runtime/model_providers/openrouter/llm/claude-3-opus.yaml deleted file mode 100644 index 68a92219eb..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/claude-3-opus.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: anthropic/claude-3-opus -label: - en_US: claude-3-opus -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: "15.00" - output: "75.00" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/claude-3-sonnet.yaml b/api/core/model_runtime/model_providers/openrouter/llm/claude-3-sonnet.yaml deleted file mode 100644 index ede88459ca..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/claude-3-sonnet.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: anthropic/claude-3-sonnet -label: - en_US: claude-3-sonnet -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 4096 - min: 1 - max: 4096 - - name: response_format - use_template: response_format -pricing: - input: "3.00" - output: "15.00" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/command-r-plus.yaml b/api/core/model_runtime/model_providers/openrouter/llm/command-r-plus.yaml deleted file mode 100644 index a23eb269d1..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/command-r-plus.yaml +++ /dev/null @@ -1,45 +0,0 @@ -model: cohere/command-r-plus -label: - en_US: command-r-plus -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: top_p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 -pricing: - input: "3" - output: "15" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/command-r.yaml b/api/core/model_runtime/model_providers/openrouter/llm/command-r.yaml deleted file mode 100644 index 7165bf29b0..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/command-r.yaml +++ /dev/null @@ -1,45 +0,0 @@ -model: cohere/command-r -label: - en_US: command-r -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - max: 5.0 - - name: top_p - use_template: top_p - default: 0.75 - min: 0.01 - max: 0.99 - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - default: 0 - min: 0 - max: 500 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 1024 - max: 4096 -pricing: - input: "0.5" - output: "1.5" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/deepseek-chat.yaml b/api/core/model_runtime/model_providers/openrouter/llm/deepseek-chat.yaml deleted file mode 100644 index 7a1dea6950..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/deepseek-chat.yaml +++ /dev/null @@ -1,50 +0,0 @@ -model: deepseek/deepseek-chat -label: - en_US: deepseek-chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 1 - min: 0.0 - max: 2.0 - help: - zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 - en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - - name: max_tokens - use_template: max_tokens - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - type: float - default: 1 - min: 0.01 - max: 1.00 - help: - zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 - en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. - - name: frequency_penalty - use_template: frequency_penalty - default: 0 - min: -2.0 - max: 2.0 - help: - zh_Hans: 介于 -2.0 和 2.0 之间的数字。如果该值为正,那么新 token 会根据其在已有文本中的出现频率受到相应的惩罚,降低模型重复相同内容的可能性。 - en_US: A number between -2.0 and 2.0. If the value is positive, new tokens are penalized based on their frequency of occurrence in existing text, reducing the likelihood that the model will repeat the same content. -pricing: - input: "0.14" - output: "0.28" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/deepseek-coder.yaml b/api/core/model_runtime/model_providers/openrouter/llm/deepseek-coder.yaml deleted file mode 100644 index c05f4769b8..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/deepseek-coder.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: deepseek/deepseek-coder -label: - en_US: deepseek-coder -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 4096 - default: 1024 -pricing: - input: "0.14" - output: "0.28" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gemini-1.5-flash.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gemini-1.5-flash.yaml deleted file mode 100644 index 0b2f329b28..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/gemini-1.5-flash.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: google/gemini-flash-1.5 -label: - en_US: gemini-flash-1.5 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format -pricing: - input: "0.25" - output: "0.75" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gemini-1.5-pro.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gemini-1.5-pro.yaml deleted file mode 100644 index 679ce9bdcd..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/gemini-1.5-pro.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: google/gemini-pro-1.5 -label: - en_US: gemini-pro-1.5 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format -pricing: - input: "2.5" - output: "7.5" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gemini-pro.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gemini-pro.yaml deleted file mode 100644 index 9f5d96c5b8..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/gemini-pro.yaml +++ /dev/null @@ -1,38 +0,0 @@ -model: google/gemini-pro -label: - en_US: gemini-pro -model_type: llm -features: - - agent-thought - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 30720 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens - use_template: max_tokens - required: true - default: 2048 - min: 1 - max: 2048 - - name: response_format - use_template: response_format -pricing: - input: "0.125" - output: "0.375" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-3.5-turbo.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-3.5-turbo.yaml deleted file mode 100644 index 186c1cc663..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/gpt-3.5-turbo.yaml +++ /dev/null @@ -1,42 +0,0 @@ -model: openai/gpt-3.5-turbo -label: - en_US: gpt-3.5-turbo -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 16385 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: "0.5" - output: "1.5" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4-32k.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4-32k.yaml deleted file mode 100644 index 8c2989b300..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4-32k.yaml +++ /dev/null @@ -1,57 +0,0 @@ -model: openai/gpt-4-32k -label: - en_US: gpt-4-32k -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 32768 - - name: seed - label: - zh_Hans: 种子 - en_US: Seed - type: int - help: - zh_Hans: - 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint - 响应参数来监视变化。 - en_US: - If specified, model will make a best effort to sample deterministically, - such that repeated requests with the same seed and parameters should return - the same result. Determinism is not guaranteed, and you should refer to the - system_fingerprint response parameter to monitor changes in the backend. - required: false - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: "60" - output: "120" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4.yaml deleted file mode 100644 index ef19d4f6f0..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4.yaml +++ /dev/null @@ -1,57 +0,0 @@ -model: openai/gpt-4 -label: - en_US: gpt-4 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 - - name: seed - label: - zh_Hans: 种子 - en_US: Seed - type: int - help: - zh_Hans: - 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint - 响应参数来监视变化。 - en_US: - If specified, model will make a best effort to sample deterministically, - such that repeated requests with the same seed and parameters should return - the same result. Determinism is not guaranteed, and you should refer to the - system_fingerprint response parameter to monitor changes in the backend. - required: false - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: "30" - output: "60" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-2024-08-06.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-2024-08-06.yaml deleted file mode 100644 index 0be325f55b..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-2024-08-06.yaml +++ /dev/null @@ -1,44 +0,0 @@ -model: gpt-4o-2024-08-06 -label: - zh_Hans: gpt-4o-2024-08-06 - en_US: gpt-4o-2024-08-06 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call - - vision -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 16384 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: '2.50' - output: '10.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-mini.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-mini.yaml deleted file mode 100644 index 3b1d95643d..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o-mini.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: openai/gpt-4o-mini -label: - en_US: gpt-4o-mini -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call - - vision -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 16384 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: "0.15" - output: "0.60" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o.yaml b/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o.yaml deleted file mode 100644 index a8c97efdd6..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/gpt-4o.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: openai/gpt-4o -label: - en_US: gpt-4o -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call - - vision -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: Response Format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: "5.00" - output: "15.00" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/llama-3-70b-instruct.yaml b/api/core/model_runtime/model_providers/openrouter/llm/llama-3-70b-instruct.yaml deleted file mode 100644 index b91c39e729..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/llama-3-70b-instruct.yaml +++ /dev/null @@ -1,23 +0,0 @@ -model: meta-llama/llama-3-70b-instruct -label: - en_US: llama-3-70b-instruct -model_type: llm -model_properties: - mode: completion - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 2048 -pricing: - input: "0.59" - output: "0.79" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/llama-3-8b-instruct.yaml b/api/core/model_runtime/model_providers/openrouter/llm/llama-3-8b-instruct.yaml deleted file mode 100644 index 84b2c7fac2..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/llama-3-8b-instruct.yaml +++ /dev/null @@ -1,23 +0,0 @@ -model: meta-llama/llama-3-8b-instruct -label: - en_US: llama-3-8b-instruct -model_type: llm -model_properties: - mode: completion - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 2048 -pricing: - input: "0.07" - output: "0.07" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/llama-3.1-405b-instruct.yaml b/api/core/model_runtime/model_providers/openrouter/llm/llama-3.1-405b-instruct.yaml deleted file mode 100644 index a489ce1b5a..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/llama-3.1-405b-instruct.yaml +++ /dev/null @@ -1,23 +0,0 @@ -model: meta-llama/llama-3.1-405b-instruct -label: - en_US: llama-3.1-405b-instruct -model_type: llm -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 131072 -pricing: - input: "2.7" - output: "2.7" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/llama-3.1-70b-instruct.yaml b/api/core/model_runtime/model_providers/openrouter/llm/llama-3.1-70b-instruct.yaml deleted file mode 100644 index 12037411b1..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/llama-3.1-70b-instruct.yaml +++ /dev/null @@ -1,23 +0,0 @@ -model: meta-llama/llama-3.1-70b-instruct -label: - en_US: llama-3.1-70b-instruct -model_type: llm -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 131072 -pricing: - input: "0.52" - output: "0.75" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/llama-3.1-8b-instruct.yaml b/api/core/model_runtime/model_providers/openrouter/llm/llama-3.1-8b-instruct.yaml deleted file mode 100644 index 6f06493f29..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/llama-3.1-8b-instruct.yaml +++ /dev/null @@ -1,23 +0,0 @@ -model: meta-llama/llama-3.1-8b-instruct -label: - en_US: llama-3.1-8b-instruct -model_type: llm -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - required: true - default: 512 - min: 1 - max: 131072 -pricing: - input: "0.06" - output: "0.06" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/llm.py b/api/core/model_runtime/model_providers/openrouter/llm/llm.py deleted file mode 100644 index 736ab8e7a8..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/llm.py +++ /dev/null @@ -1,106 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool -from core.model_runtime.entities.model_entities import AIModelEntity -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel - - -class OpenRouterLargeLanguageModel(OAIAPICompatLargeLanguageModel): - def _update_credential(self, model: str, credentials: dict): - credentials["endpoint_url"] = "https://openrouter.ai/api/v1" - credentials["mode"] = self.get_model_mode(model).value - credentials["function_calling_type"] = "tool_call" - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._update_credential(model, credentials) - - return self._generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._update_credential(model, credentials) - - return super().validate_credentials(model, credentials) - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._update_credential(model, credentials) - - block_as_stream = False - if model.startswith("openai/o1"): - block_as_stream = True - stop = None - - # invoke block as stream - if stream and block_as_stream: - return self._generate_block_as_stream( - model, credentials, prompt_messages, model_parameters, tools, stop, user - ) - else: - return super()._generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def _generate_block_as_stream( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - user: Optional[str] = None, - ) -> Generator: - resp: LLMResult = super()._generate( - model, credentials, prompt_messages, model_parameters, tools, stop, False, user - ) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=resp.message, - usage=self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=resp.usage.prompt_tokens, - completion_tokens=resp.usage.completion_tokens, - ), - finish_reason="stop", - ), - ) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - self._update_credential(model, credentials) - - return super().get_customizable_model_schema(model, credentials) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - self._update_credential(model, credentials) - - return super().get_num_tokens(model, credentials, prompt_messages, tools) diff --git a/api/core/model_runtime/model_providers/openrouter/llm/mistral-7b-instruct.yaml b/api/core/model_runtime/model_providers/openrouter/llm/mistral-7b-instruct.yaml deleted file mode 100644 index 012dfc55ce..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/mistral-7b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: mistralai/mistral-7b-instruct -label: - en_US: mistral-7b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 2048 -pricing: - input: "0.07" - output: "0.07" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/mixtral-8x22b-instruct.yaml b/api/core/model_runtime/model_providers/openrouter/llm/mixtral-8x22b-instruct.yaml deleted file mode 100644 index f4eb4e45d9..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/mixtral-8x22b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: mistralai/mixtral-8x22b-instruct -label: - en_US: mixtral-8x22b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 64000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 8000 -pricing: - input: "0.65" - output: "0.65" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/mixtral-8x7b-instruct.yaml b/api/core/model_runtime/model_providers/openrouter/llm/mixtral-8x7b-instruct.yaml deleted file mode 100644 index 7871e1f7a0..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/mixtral-8x7b-instruct.yaml +++ /dev/null @@ -1,31 +0,0 @@ -model: mistralai/mixtral-8x7b-instruct -label: - zh_Hans: mixtral-8x7b-instruct - en_US: mixtral-8x7b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.7 - min: 0 - max: 1 - - name: top_p - use_template: top_p - default: 1 - min: 0 - max: 1 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 8000 -pricing: - input: "0.24" - output: "0.24" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/o1-mini.yaml b/api/core/model_runtime/model_providers/openrouter/llm/o1-mini.yaml deleted file mode 100644 index 85a918ff5e..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/o1-mini.yaml +++ /dev/null @@ -1,40 +0,0 @@ -model: openai/o1-mini -label: - en_US: o1-mini -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 65536 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: response_format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: "3.00" - output: "12.00" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/o1-preview.yaml b/api/core/model_runtime/model_providers/openrouter/llm/o1-preview.yaml deleted file mode 100644 index 74b0a511be..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/o1-preview.yaml +++ /dev/null @@ -1,40 +0,0 @@ -model: openai/o1-preview -label: - en_US: o1-preview -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 32768 - - name: response_format - label: - zh_Hans: 回复格式 - en_US: response_format - type: string - help: - zh_Hans: 指定模型必须输出的格式 - en_US: specifying the format that the model must output - required: false - options: - - text - - json_object -pricing: - input: "15.00" - output: "60.00" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/llm/qwen2-72b-instruct.yaml b/api/core/model_runtime/model_providers/openrouter/llm/qwen2-72b-instruct.yaml deleted file mode 100644 index 7b75fcb0c9..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/llm/qwen2-72b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: qwen/qwen-2-72b-instruct -label: - en_US: qwen-2-72b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: "0.59" - output: "0.79" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/openrouter/openrouter.py b/api/core/model_runtime/model_providers/openrouter/openrouter.py deleted file mode 100644 index 2e59ab5059..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/openrouter.py +++ /dev/null @@ -1,20 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class OpenRouterProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - try: - model_instance = self.get_model_instance(ModelType.LLM) - - model_instance.validate_credentials(model="openai/gpt-3.5-turbo", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/openrouter/openrouter.yaml b/api/core/model_runtime/model_providers/openrouter/openrouter.yaml deleted file mode 100644 index f7536609ec..0000000000 --- a/api/core/model_runtime/model_providers/openrouter/openrouter.yaml +++ /dev/null @@ -1,105 +0,0 @@ -provider: openrouter -label: - en_US: OpenRouter -icon_small: - en_US: openrouter_square.svg -icon_large: - en_US: openrouter.svg -background: "#F1EFED" -help: - title: - en_US: Get your API key from openrouter.ai - zh_Hans: 从 openrouter.ai 获取 API Key - url: - en_US: https://openrouter.ai/keys -supported_model_types: - - llm -configurate_methods: - - predefined-model - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter full model name - zh_Hans: 输入模型全称 - credential_form_schemas: - - variable: api_key - required: true - label: - en_US: API Key - type: secret-input - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: mode - show_on: - - variable: __model_type - value: llm - label: - en_US: Completion mode - type: select - required: false - default: chat - placeholder: - zh_Hans: 选择对话类型 - en_US: Select completion mode - options: - - value: completion - label: - en_US: Completion - zh_Hans: 补全 - - value: chat - label: - en_US: Chat - zh_Hans: 对话 - - variable: context_size - label: - zh_Hans: 模型上下文长度 - en_US: Model context size - required: true - type: text-input - default: "4096" - placeholder: - zh_Hans: 在此输入您的模型上下文长度 - en_US: Enter your Model context size - - variable: max_tokens_to_sample - label: - zh_Hans: 最大 token 上限 - en_US: Upper bound for max tokens - show_on: - - variable: __model_type - value: llm - default: "4096" - type: text-input - - variable: vision_support - show_on: - - variable: __model_type - value: llm - label: - zh_Hans: 是否支持 Vision - en_US: Vision Support - type: radio - required: false - default: "no_support" - options: - - value: "support" - label: - en_US: "Yes" - zh_Hans: 是 - - value: "no_support" - label: - en_US: "No" - zh_Hans: 否 -provider_credential_schema: - credential_form_schemas: - - variable: api_key - required: true - label: - en_US: API Key - type: secret-input - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/perfxcloud/__init__.py b/api/core/model_runtime/model_providers/perfxcloud/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/perfxcloud/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/perfxcloud/_assets/icon_l_en.svg deleted file mode 100644 index 060d9de3a9..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/_assets/icon_l_en.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/api/core/model_runtime/model_providers/perfxcloud/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/perfxcloud/_assets/icon_s_en.svg deleted file mode 100644 index be0c2eeb1c..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/_assets/icon_s_en.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Llama3-Chinese_v2.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Llama3-Chinese_v2.yaml deleted file mode 100644 index bf91468fcf..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Llama3-Chinese_v2.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: Llama3-Chinese_v2 -label: - en_US: Llama3-Chinese_v2 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-70B-Instruct-GPTQ-Int4.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-70B-Instruct-GPTQ-Int4.yaml deleted file mode 100644 index 781b837e8e..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-70B-Instruct-GPTQ-Int4.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: Meta-Llama-3-70B-Instruct-GPTQ-Int4 -label: - en_US: Meta-Llama-3-70B-Instruct-GPTQ-Int4 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 1024 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-8B-Instruct.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-8B-Instruct.yaml deleted file mode 100644 index 67210e9020..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3-8B-Instruct.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: Meta-Llama-3-8B-Instruct -label: - en_US: Meta-Llama-3-8B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3.1-405B-Instruct-AWQ-INT4.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3.1-405B-Instruct-AWQ-INT4.yaml deleted file mode 100644 index 482632ff06..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3.1-405B-Instruct-AWQ-INT4.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: Meta-Llama-3.1-405B-Instruct-AWQ-INT4 -label: - en_US: Meta-Llama-3.1-405B-Instruct-AWQ-INT4 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 410960 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3.1-8B-Instruct.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3.1-8B-Instruct.yaml deleted file mode 100644 index bbab46344c..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Meta-Llama-3.1-8B-Instruct.yaml +++ /dev/null @@ -1,61 +0,0 @@ -model: Meta-Llama-3.1-8B-Instruct -label: - en_US: Meta-Llama-3.1-8B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.1 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen-14B-Chat-Int4.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen-14B-Chat-Int4.yaml deleted file mode 100644 index ec6d9bcc14..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen-14B-Chat-Int4.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: Qwen-14B-Chat-Int4 -label: - en_US: Qwen-14B-Chat-Int4 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-110B-Chat-GPTQ-Int4.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-110B-Chat-GPTQ-Int4.yaml deleted file mode 100644 index b561a53039..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-110B-Chat-GPTQ-Int4.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: Qwen1.5-110B-Chat-GPTQ-Int4 -label: - en_US: Qwen1.5-110B-Chat-GPTQ-Int4 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 128 - min: 1 - max: 256 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-72B-Chat-GPTQ-Int4.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-72B-Chat-GPTQ-Int4.yaml deleted file mode 100644 index ddb6fd977c..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-72B-Chat-GPTQ-Int4.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: Qwen1.5-72B-Chat-GPTQ-Int4 -label: - en_US: Qwen1.5-72B-Chat-GPTQ-Int4 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 2048 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-7B.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-7B.yaml deleted file mode 100644 index 024c79dbcf..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen1.5-7B.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: Qwen1.5-7B -label: - en_US: Qwen1.5-7B -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-AWQ-int4.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-AWQ-int4.yaml deleted file mode 100644 index 94f661f40d..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-AWQ-int4.yaml +++ /dev/null @@ -1,61 +0,0 @@ -model: Qwen2-72B-Instruct-AWQ-int4 -label: - en_US: Qwen2-72B-Instruct-AWQ-int4 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-GPTQ-Int4.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-GPTQ-Int4.yaml deleted file mode 100644 index a06f8d5ab1..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct-GPTQ-Int4.yaml +++ /dev/null @@ -1,64 +0,0 @@ -model: Qwen2-72B-Instruct-GPTQ-Int4 -label: - en_US: Qwen2-72B-Instruct-GPTQ-Int4 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 2048 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.7 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct.yaml deleted file mode 100644 index cea6560295..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-72B-Instruct.yaml +++ /dev/null @@ -1,61 +0,0 @@ -model: Qwen2-72B-Instruct -label: - en_US: Qwen2-72B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B-Instruct.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B-Instruct.yaml deleted file mode 100644 index 4369411399..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B-Instruct.yaml +++ /dev/null @@ -1,63 +0,0 @@ -model: Qwen2-7B-Instruct -label: - en_US: Qwen2-7B-Instruct -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: completion - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B.yaml deleted file mode 100644 index d549ecd227..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2-7B.yaml +++ /dev/null @@ -1,64 +0,0 @@ -model: Qwen2-7B -label: - en_US: Qwen2-7B -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: completion - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-72B-Instruct.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-72B-Instruct.yaml deleted file mode 100644 index 15cbf01f1f..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-72B-Instruct.yaml +++ /dev/null @@ -1,61 +0,0 @@ -model: Qwen2.5-72B-Instruct -label: - en_US: Qwen2.5-72B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 30720 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-7B-Instruct.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-7B-Instruct.yaml deleted file mode 100644 index dadc8f8f32..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Qwen2.5-7B-Instruct.yaml +++ /dev/null @@ -1,61 +0,0 @@ -model: Qwen2.5-7B-Instruct -label: - en_US: Qwen2.5-7B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Reflection-Llama-3.1-70B.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Reflection-Llama-3.1-70B.yaml deleted file mode 100644 index 649be20b48..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Reflection-Llama-3.1-70B.yaml +++ /dev/null @@ -1,61 +0,0 @@ -model: Reflection-Llama-3.1-70B -label: - en_US: Reflection-Llama-3.1-70B -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 10240 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-1_5-9B-Chat-16K.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-1_5-9B-Chat-16K.yaml deleted file mode 100644 index 92eae6804f..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-1_5-9B-Chat-16K.yaml +++ /dev/null @@ -1,61 +0,0 @@ -model: Yi-1_5-9B-Chat-16K -label: - en_US: Yi-1_5-9B-Chat-16K -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 16384 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-1.5B-Chat.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-1.5B-Chat.yaml deleted file mode 100644 index 0e21ce148c..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-1.5B-Chat.yaml +++ /dev/null @@ -1,61 +0,0 @@ -model: Yi-Coder-1.5B-Chat -label: - en_US: Yi-Coder-1.5B-Chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 20480 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-9B-Chat.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-9B-Chat.yaml deleted file mode 100644 index 23b0841ce4..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/Yi-Coder-9B-Chat.yaml +++ /dev/null @@ -1,61 +0,0 @@ -model: Yi-Coder-9B-Chat -label: - en_US: Yi-Coder-9B-Chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 20480 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/__init__.py b/api/core/model_runtime/model_providers/perfxcloud/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/_position.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/_position.yaml deleted file mode 100644 index 37bf400f1e..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/_position.yaml +++ /dev/null @@ -1,24 +0,0 @@ -- Qwen2.5-72B-Instruct -- Qwen2.5-7B-Instruct -- Yi-Coder-1.5B-Chat -- Yi-Coder-9B-Chat -- Qwen2-72B-Instruct-AWQ-int4 -- Yi-1_5-9B-Chat-16K -- Qwen2-7B-Instruct -- Reflection-Llama-3.1-70B -- Qwen2-72B-Instruct -- Meta-Llama-3.1-8B-Instruct - -- Meta-Llama-3.1-405B-Instruct-AWQ-INT4 -- Meta-Llama-3-70B-Instruct-GPTQ-Int4 -- chatglm3-6b -- Meta-Llama-3-8B-Instruct -- Llama3-Chinese_v2 -- deepseek-v2-lite-chat -- Qwen2-72B-Instruct-GPTQ-Int4 -- Qwen2-7B -- Qwen-14B-Chat-Int4 -- Qwen1.5-72B-Chat-GPTQ-Int4 -- Qwen1.5-7B -- Qwen1.5-110B-Chat-GPTQ-Int4 -- deepseek-v2-chat diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/chatglm3-6b.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/chatglm3-6b.yaml deleted file mode 100644 index 75d80f784a..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/chatglm3-6b.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: chatglm3-6b -label: - en_US: chatglm3-6b -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-chat.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-chat.yaml deleted file mode 100644 index fa9a7b7175..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-chat.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: deepseek-v2-chat -label: - en_US: deepseek-v2-chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-lite-chat.yaml b/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-lite-chat.yaml deleted file mode 100644 index 75a26d2505..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/deepseek-v2-lite-chat.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: deepseek-v2-lite-chat -label: - en_US: deepseek-v2-lite-chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 2048 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.5 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 600 - min: 1 - max: 1248 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. -pricing: - input: "0.000" - output: "0.000" - unit: "0.000" - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/perfxcloud/llm/llm.py b/api/core/model_runtime/model_providers/perfxcloud/llm/llm.py deleted file mode 100644 index 89cac665aa..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/llm/llm.py +++ /dev/null @@ -1,116 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union -from urllib.parse import urlparse - -import tiktoken - -from core.model_runtime.entities.llm_entities import LLMResult -from core.model_runtime.entities.message_entities import ( - PromptMessage, - PromptMessageTool, -) -from core.model_runtime.model_providers.openai.llm.llm import OpenAILargeLanguageModel - - -class PerfXCloudLargeLanguageModel(OpenAILargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._add_custom_parameters(credentials) - - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream) - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials) - super().validate_credentials(model, credentials) - - # refactored from openai model runtime, use cl100k_base for calculate token number - def _num_tokens_from_string(self, model: str, text: str, tools: Optional[list[PromptMessageTool]] = None) -> int: - """ - Calculate num tokens for text completion model with tiktoken package. - - :param model: model name - :param text: prompt text - :param tools: tools for tool calling - :return: number of tokens - """ - encoding = tiktoken.get_encoding("cl100k_base") - num_tokens = len(encoding.encode(text)) - - if tools: - num_tokens += self._num_tokens_for_tools(encoding, tools) - - return num_tokens - - # refactored from openai model runtime, use cl100k_base for calculate token number - def _num_tokens_from_messages( - self, model: str, messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None - ) -> int: - """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. - - Official documentation: https://github.com/openai/openai-cookbook/blob/ - main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" - encoding = tiktoken.get_encoding("cl100k_base") - tokens_per_message = 3 - tokens_per_name = 1 - - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - # Cast str(value) in case the message value is not a string - # This occurs with function messages - # TODO: The current token calculation method for the image type is not implemented, - # which need to download the image and then get the resolution for calculation, - # and will increase the request delay - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - if key == "tool_calls": - for tool_call in value: - for t_key, t_value in tool_call.items(): - num_tokens += len(encoding.encode(t_key)) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += len(encoding.encode(f_key)) - num_tokens += len(encoding.encode(f_value)) - else: - num_tokens += len(encoding.encode(t_key)) - num_tokens += len(encoding.encode(t_value)) - else: - num_tokens += len(encoding.encode(str(value))) - - if key == "name": - num_tokens += tokens_per_name - - # every reply is primed with assistant - num_tokens += 3 - - if tools: - num_tokens += self._num_tokens_for_tools(encoding, tools) - - return num_tokens - - @staticmethod - def _add_custom_parameters(credentials: dict) -> None: - credentials["mode"] = "chat" - credentials["openai_api_key"] = credentials["api_key"] - if "endpoint_url" not in credentials or credentials["endpoint_url"] == "": - credentials["openai_api_base"] = "https://cloud.perfxlab.cn" - else: - parsed_url = urlparse(credentials["endpoint_url"]) - credentials["openai_api_base"] = f"{parsed_url.scheme}://{parsed_url.netloc}" diff --git a/api/core/model_runtime/model_providers/perfxcloud/perfxcloud.py b/api/core/model_runtime/model_providers/perfxcloud/perfxcloud.py deleted file mode 100644 index 9a4ead031d..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/perfxcloud.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class PerfXCloudProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/perfxcloud/perfxcloud.yaml b/api/core/model_runtime/model_providers/perfxcloud/perfxcloud.yaml deleted file mode 100644 index 10ee691ebd..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/perfxcloud.yaml +++ /dev/null @@ -1,42 +0,0 @@ -provider: perfxcloud -label: - en_US: PerfXCloud - zh_Hans: PerfXCloud -description: - en_US: PerfXCloud (Pengfeng Technology) is an AI development and deployment platform tailored for developers and enterprises, providing reasoning capabilities for multiple models. - zh_Hans: PerfXCloud(澎峰科技)为开发者和企业量身打造的AI开发和部署平台,提供多种模型的的推理能力。 -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#e3f0ff" -help: - title: - en_US: Get your API Key from PerfXCloud - zh_Hans: 从 PerfXCloud 获取 API Key - url: - en_US: https://cloud.perfxlab.cn/panel/token -supported_model_types: - - llm - - text-embedding -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: endpoint_url - label: - zh_Hans: 自定义 API endpoint 地址 - en_US: Custom API endpoint URL - type: text-input - required: false - placeholder: - zh_Hans: Base URL, e.g. https://cloud.perfxlab.cn/v1 - en_US: Base URL, e.g. https://cloud.perfxlab.cn/v1 diff --git a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/BAAI-bge-large-en-v1.5.yaml b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/BAAI-bge-large-en-v1.5.yaml deleted file mode 100644 index 5756fb3d14..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/BAAI-bge-large-en-v1.5.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: BAAI/bge-large-en-v1.5 -model_type: text-embedding -model_properties: - context_size: 32768 diff --git a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/BAAI-bge-large-zh-v1.5.yaml b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/BAAI-bge-large-zh-v1.5.yaml deleted file mode 100644 index 4204ab2860..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/BAAI-bge-large-zh-v1.5.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: BAAI/bge-large-zh-v1.5 -model_type: text-embedding -model_properties: - context_size: 32768 diff --git a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/BAAI-bge-m3.yaml b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/BAAI-bge-m3.yaml deleted file mode 100644 index 55488e5688..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/BAAI-bge-m3.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: BAAI/bge-m3 -model_type: text-embedding -model_properties: - context_size: 32768 diff --git a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/__init__.py b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/gte-Qwen2-7B-instruct.yaml b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/gte-Qwen2-7B-instruct.yaml deleted file mode 100644 index 03db0d8bce..0000000000 --- a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/gte-Qwen2-7B-instruct.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: gte-Qwen2-7B-instruct -model_type: text-embedding -model_properties: - context_size: 2048 diff --git a/api/core/model_runtime/model_providers/replicate/__init__.py b/api/core/model_runtime/model_providers/replicate/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/replicate/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/replicate/_assets/icon_l_en.svg deleted file mode 100644 index 63c09470d5..0000000000 --- a/api/core/model_runtime/model_providers/replicate/_assets/icon_l_en.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/replicate/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/replicate/_assets/icon_s_en.svg deleted file mode 100644 index 527316edb6..0000000000 --- a/api/core/model_runtime/model_providers/replicate/_assets/icon_s_en.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/api/core/model_runtime/model_providers/replicate/_common.py b/api/core/model_runtime/model_providers/replicate/_common.py deleted file mode 100644 index 915f6e0eef..0000000000 --- a/api/core/model_runtime/model_providers/replicate/_common.py +++ /dev/null @@ -1,9 +0,0 @@ -from replicate.exceptions import ModelError, ReplicateError - -from core.model_runtime.errors.invoke import InvokeBadRequestError, InvokeError - - -class _CommonReplicate: - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return {InvokeBadRequestError: [ReplicateError, ModelError]} diff --git a/api/core/model_runtime/model_providers/replicate/llm/__init__.py b/api/core/model_runtime/model_providers/replicate/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/replicate/llm/llm.py b/api/core/model_runtime/model_providers/replicate/llm/llm.py deleted file mode 100644 index 3641b35dc0..0000000000 --- a/api/core/model_runtime/model_providers/replicate/llm/llm.py +++ /dev/null @@ -1,305 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union - -from replicate import Client as ReplicateClient -from replicate.exceptions import ReplicateError -from replicate.prediction import Prediction - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageRole, - PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - ModelPropertyKey, - ModelType, - ParameterRule, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.replicate._common import _CommonReplicate - - -class ReplicateLargeLanguageModel(_CommonReplicate, LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - model_version = "" - if "model_version" in credentials: - model_version = credentials["model_version"] - - client = ReplicateClient(api_token=credentials["replicate_api_token"], timeout=30) - model_info = client.models.get(model) - - if model_version: - model_info_version = model_info.versions.get(model_version) - else: - model_info_version = model_info.latest_version - - inputs = {**model_parameters} - - if prompt_messages[0].role == PromptMessageRole.SYSTEM: - if "system_prompt" in model_info_version.openapi_schema["components"]["schemas"]["Input"]["properties"]: - inputs["system_prompt"] = prompt_messages[0].content - inputs["prompt"] = prompt_messages[1].content - else: - inputs["prompt"] = prompt_messages[0].content - - prediction = client.predictions.create(version=model_info_version, input=inputs) - - if stream: - return self._handle_generate_stream_response(model, credentials, prediction, stop, prompt_messages) - return self._handle_generate_response(model, credentials, prediction, stop, prompt_messages) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - prompt = self._convert_messages_to_prompt(prompt_messages) - return self._get_num_tokens_by_gpt2(prompt) - - def validate_credentials(self, model: str, credentials: dict) -> None: - if "replicate_api_token" not in credentials: - raise CredentialsValidateFailedError("Replicate Access Token must be provided.") - - model_version = "" - if "model_version" in credentials: - model_version = credentials["model_version"] - - if model.count("/") != 1: - raise CredentialsValidateFailedError( - "Replicate Model Name must be provided, format: {user_name}/{model_name}" - ) - - try: - client = ReplicateClient(api_token=credentials["replicate_api_token"], timeout=30) - model_info = client.models.get(model) - - if model_version: - model_info_version = model_info.versions.get(model_version) - else: - model_info_version = model_info.latest_version - - self._check_text_generation_model(model_info_version, model, model_version, model_info.description) - except ReplicateError as e: - raise CredentialsValidateFailedError( - f"Model {model}:{model_version} not exists, cause: {e.__class__.__name__}:{str(e)}" - ) - except Exception as e: - raise CredentialsValidateFailedError(str(e)) - - @staticmethod - def _check_text_generation_model(model_info_version, model_name, version, description): - if "language model" in description.lower(): - return - - if ( - "temperature" not in model_info_version.openapi_schema["components"]["schemas"]["Input"]["properties"] - or "top_p" not in model_info_version.openapi_schema["components"]["schemas"]["Input"]["properties"] - or "top_k" not in model_info_version.openapi_schema["components"]["schemas"]["Input"]["properties"] - ): - raise CredentialsValidateFailedError(f"Model {model_name}:{version} is not a Text Generation model.") - - def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]: - model_type = LLMMode.CHAT if model.endswith("-chat") else LLMMode.COMPLETION - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.LLM, - model_properties={ModelPropertyKey.MODE: model_type.value}, - parameter_rules=self._get_customizable_model_parameter_rules(model, credentials), - ) - - return entity - - @classmethod - def _get_customizable_model_parameter_rules(cls, model: str, credentials: dict) -> list[ParameterRule]: - model_version = "" - if "model_version" in credentials: - model_version = credentials["model_version"] - - client = ReplicateClient(api_token=credentials["replicate_api_token"], timeout=30) - model_info = client.models.get(model) - - if model_version: - model_info_version = model_info.versions.get(model_version) - else: - model_info_version = model_info.latest_version - - parameter_rules = [] - - input_properties = sorted( - model_info_version.openapi_schema["components"]["schemas"]["Input"]["properties"].items(), - key=lambda item: item[1].get("x-order", 0), - ) - - for key, value in input_properties: - if key not in {"system_prompt", "prompt"} and "stop" not in key: - value_type = value.get("type") - - if not value_type: - continue - - param_type = cls._get_parameter_type(value_type) - - rule = ParameterRule( - name=key, - label={"en_US": value["title"]}, - type=param_type, - help={ - "en_US": value.get("description"), - }, - required=False, - default=value.get("default"), - min=value.get("minimum"), - max=value.get("maximum"), - ) - parameter_rules.append(rule) - - return parameter_rules - - def _handle_generate_stream_response( - self, - model: str, - credentials: dict, - prediction: Prediction, - stop: list[str], - prompt_messages: list[PromptMessage], - ) -> Generator: - index = -1 - current_completion: str = "" - stop_condition_reached = False - - prediction_output_length = 10000 - is_prediction_output_finished = False - - for output in prediction.output_iterator(): - current_completion += output - - if not is_prediction_output_finished and prediction.status == "succeeded": - prediction_output_length = len(prediction.output) - 1 - is_prediction_output_finished = True - - if stop: - for s in stop: - if s in current_completion: - prediction.cancel() - stop_index = current_completion.find(s) - current_completion = current_completion[:stop_index] - stop_condition_reached = True - break - - if stop_condition_reached: - break - - index += 1 - - assistant_prompt_message = AssistantPromptMessage(content=output or "") - - if index < prediction_output_length: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta(index=index, message=assistant_prompt_message), - ) - else: - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) - - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta(index=index, message=assistant_prompt_message, usage=usage), - ) - - def _handle_generate_response( - self, - model: str, - credentials: dict, - prediction: Prediction, - stop: list[str], - prompt_messages: list[PromptMessage], - ) -> LLMResult: - current_completion: str = "" - stop_condition_reached = False - for output in prediction.output_iterator(): - current_completion += output - - if stop: - for s in stop: - if s in current_completion: - prediction.cancel() - stop_index = current_completion.find(s) - current_completion = current_completion[:stop_index] - stop_condition_reached = True - break - - if stop_condition_reached: - break - - assistant_prompt_message = AssistantPromptMessage(content=current_completion) - - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) - - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - result = LLMResult( - model=model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - ) - - return result - - @classmethod - def _get_parameter_type(cls, param_type: str) -> str: - type_mapping = {"integer": "int", "number": "float", "boolean": "boolean", "string": "string"} - return type_mapping.get(param_type) - - def _convert_messages_to_prompt(self, messages: list[PromptMessage]) -> str: - messages = messages.copy() # don't mutate the original list - - text = "".join(self._convert_one_message_to_text(message) for message in messages) - - return text.rstrip() - - @staticmethod - def _convert_one_message_to_text(message: PromptMessage) -> str: - human_prompt = "\n\nHuman:" - ai_prompt = "\n\nAssistant:" - content = message.content - - if isinstance(message, UserPromptMessage): - message_text = f"{human_prompt} {content}" - elif isinstance(message, AssistantPromptMessage): - message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage): - message_text = content - else: - raise ValueError(f"Got unknown type {message}") - - return message_text diff --git a/api/core/model_runtime/model_providers/replicate/replicate.py b/api/core/model_runtime/model_providers/replicate/replicate.py deleted file mode 100644 index ca137579c9..0000000000 --- a/api/core/model_runtime/model_providers/replicate/replicate.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class ReplicateProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/replicate/replicate.yaml b/api/core/model_runtime/model_providers/replicate/replicate.yaml deleted file mode 100644 index 9cad6d4f0d..0000000000 --- a/api/core/model_runtime/model_providers/replicate/replicate.yaml +++ /dev/null @@ -1,41 +0,0 @@ -provider: replicate -label: - en_US: Replicate -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#E5E7EB" -help: - title: - en_US: Get your API Key from Replicate - zh_Hans: 从 Replicate 获取 API Key - url: - en_US: https://replicate.com/account/api-tokens -supported_model_types: - - llm - - text-embedding -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - credential_form_schemas: - - variable: replicate_api_token - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 Replicate API Key - en_US: Enter your Replicate API Key - - variable: model_version - label: - en_US: Model Version - type: text-input - required: false - placeholder: - zh_Hans: 在此输入您的模型版本,默认为最新版本 - en_US: Enter your model version, default to the latest version diff --git a/api/core/model_runtime/model_providers/replicate/text_embedding/__init__.py b/api/core/model_runtime/model_providers/replicate/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/sagemaker/__init__.py b/api/core/model_runtime/model_providers/sagemaker/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/sagemaker/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/sagemaker/_assets/icon_l_en.png deleted file mode 100644 index 0abe07a78f..0000000000 Binary files a/api/core/model_runtime/model_providers/sagemaker/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/sagemaker/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/sagemaker/_assets/icon_s_en.png deleted file mode 100644 index 6b88942a5c..0000000000 Binary files a/api/core/model_runtime/model_providers/sagemaker/_assets/icon_s_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/sagemaker/llm/__init__.py b/api/core/model_runtime/model_providers/sagemaker/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/sagemaker/rerank/__init__.py b/api/core/model_runtime/model_providers/sagemaker/rerank/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/sagemaker/rerank/rerank.py b/api/core/model_runtime/model_providers/sagemaker/rerank/rerank.py deleted file mode 100644 index 959dff6a21..0000000000 --- a/api/core/model_runtime/model_providers/sagemaker/rerank/rerank.py +++ /dev/null @@ -1,173 +0,0 @@ -import json -import logging -import operator -from typing import Any, Optional - -import boto3 - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType -from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.rerank_model import RerankModel - -logger = logging.getLogger(__name__) - - -class SageMakerRerankModel(RerankModel): - """ - Model class for SageMaker rerank model. - """ - - sagemaker_client: Any = None - - def _sagemaker_rerank(self, query_input: str, docs: list[str], rerank_endpoint: str): - inputs = [query_input] * len(docs) - response_model = self.sagemaker_client.invoke_endpoint( - EndpointName=rerank_endpoint, - Body=json.dumps({"inputs": inputs, "docs": docs}), - ContentType="application/json", - ) - json_str = response_model["Body"].read().decode("utf8") - json_obj = json.loads(json_str) - scores = json_obj["scores"] - return scores if isinstance(scores, list) else [scores] - - def _invoke( - self, - model: str, - credentials: dict, - query: str, - docs: list[str], - score_threshold: Optional[float] = None, - top_n: Optional[int] = None, - user: Optional[str] = None, - ) -> RerankResult: - """ - Invoke rerank model - - :param model: model name - :param credentials: model credentials - :param query: search query - :param docs: docs for reranking - :param score_threshold: score threshold - :param top_n: top n - :param user: unique user id - :return: rerank result - """ - line = 0 - try: - if len(docs) == 0: - return RerankResult(model=model, docs=docs) - - line = 1 - if not self.sagemaker_client: - access_key = credentials.get("aws_access_key_id") - secret_key = credentials.get("aws_secret_access_key") - aws_region = credentials.get("aws_region") - if aws_region: - if access_key and secret_key: - self.sagemaker_client = boto3.client( - "sagemaker-runtime", - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - region_name=aws_region, - ) - else: - self.sagemaker_client = boto3.client("sagemaker-runtime", region_name=aws_region) - else: - self.sagemaker_client = boto3.client("sagemaker-runtime") - - line = 2 - - sagemaker_endpoint = credentials.get("sagemaker_endpoint") - candidate_docs = [] - - scores = self._sagemaker_rerank(query, docs, sagemaker_endpoint) - for idx in range(len(scores)): - candidate_docs.append({"content": docs[idx], "score": scores[idx]}) - - sorted(candidate_docs, key=operator.itemgetter("score"), reverse=True) - - line = 3 - rerank_documents = [] - for idx, result in enumerate(candidate_docs): - rerank_document = RerankDocument( - index=idx, text=result.get("content"), score=result.get("score", -100.0) - ) - - if score_threshold is not None: - if rerank_document.score >= score_threshold: - rerank_documents.append(rerank_document) - else: - rerank_documents.append(rerank_document) - - return RerankResult(model=model, docs=rerank_documents) - - except Exception as e: - logger.exception(f"Exception {e}, line : {line}") - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke( - model=model, - credentials=credentials, - query="What is the capital of the United States?", - docs=[ - "Carson City is the capital city of the American state of Nevada. At the 2010 United States " - "Census, Carson City had a population of 55,274.", - "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " - "are a political division controlled by the United States. Its capital is Saipan.", - ], - score_threshold=0.8, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.RERANK, - model_properties={}, - parameter_rules=[], - ) - - return entity diff --git a/api/core/model_runtime/model_providers/sagemaker/sagemaker.py b/api/core/model_runtime/model_providers/sagemaker/sagemaker.py deleted file mode 100644 index 042155b152..0000000000 --- a/api/core/model_runtime/model_providers/sagemaker/sagemaker.py +++ /dev/null @@ -1,41 +0,0 @@ -import logging -import uuid -from typing import IO, Any - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class SageMakerProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - pass - - -def buffer_to_s3(s3_client: Any, file: IO[bytes], bucket: str, s3_prefix: str) -> str: - """ - return s3_uri of this file - """ - s3_key = f"{s3_prefix}{uuid.uuid4()}.mp3" - s3_client.put_object(Body=file.read(), Bucket=bucket, Key=s3_key, ContentType="audio/mp3") - return s3_key - - -def generate_presigned_url(s3_client: Any, file: IO[bytes], bucket_name: str, s3_prefix: str, expiration=600) -> str: - object_key = buffer_to_s3(s3_client, file, bucket_name, s3_prefix) - try: - response = s3_client.generate_presigned_url( - "get_object", Params={"Bucket": bucket_name, "Key": object_key}, ExpiresIn=expiration - ) - except Exception as e: - print(f"Error generating presigned URL: {e}") - return None - - return response diff --git a/api/core/model_runtime/model_providers/sagemaker/sagemaker.yaml b/api/core/model_runtime/model_providers/sagemaker/sagemaker.yaml deleted file mode 100644 index 87cd50f50c..0000000000 --- a/api/core/model_runtime/model_providers/sagemaker/sagemaker.yaml +++ /dev/null @@ -1,193 +0,0 @@ -provider: sagemaker -label: - zh_Hans: Sagemaker - en_US: Sagemaker -icon_small: - en_US: icon_s_en.png -icon_large: - en_US: icon_l_en.png -description: - en_US: Customized model on Sagemaker - zh_Hans: Sagemaker上的私有化部署的模型 -background: "#ECE9E3" -help: - title: - en_US: How to deploy customized model on Sagemaker - zh_Hans: 如何在Sagemaker上的私有化部署的模型 - url: - en_US: https://github.com/aws-samples/dify-aws-tool/blob/main/README.md#how-to-deploy-sagemaker-endpoint - zh_Hans: https://github.com/aws-samples/dify-aws-tool/blob/main/README_ZH.md#%E5%A6%82%E4%BD%95%E9%83%A8%E7%BD%B2sagemaker%E6%8E%A8%E7%90%86%E7%AB%AF%E7%82%B9 -supported_model_types: - - llm - - text-embedding - - rerank - - speech2text - - tts -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: mode - show_on: - - variable: __model_type - value: llm - label: - en_US: Completion mode - type: select - required: false - default: chat - placeholder: - zh_Hans: 选择对话类型 - en_US: Select completion mode - options: - - value: chat - label: - en_US: Chat - zh_Hans: Chat - - variable: sagemaker_endpoint - label: - en_US: sagemaker endpoint - type: text-input - required: true - placeholder: - zh_Hans: 请输出你的Sagemaker推理端点 - en_US: Enter your Sagemaker Inference endpoint - - variable: audio_s3_cache_bucket - show_on: - - variable: __model_type - value: speech2text - label: - zh_Hans: 音频缓存桶(s3 bucket) - en_US: audio cache bucket(s3 bucket) - type: text-input - required: true - placeholder: - zh_Hans: sagemaker-us-east-1-******207838 - en_US: sagemaker-us-east-1-*******7838 - - variable: audio_model_type - show_on: - - variable: __model_type - value: tts - label: - en_US: Audio model type - type: select - required: true - placeholder: - zh_Hans: 语音模型类型 - en_US: Audio model type - options: - - value: PresetVoice - label: - en_US: preset voice - zh_Hans: 内置音色 - - value: CloneVoice - label: - en_US: clone voice - zh_Hans: 克隆音色 - - value: CloneVoice_CrossLingual - label: - en_US: crosslingual clone voice - zh_Hans: 跨语种克隆音色 - - value: InstructVoice - label: - en_US: Instruct voice - zh_Hans: 文字指令音色 - - variable: prompt_audio - show_on: - - variable: __model_type - value: tts - label: - en_US: Mock Audio Source - type: text-input - required: false - placeholder: - zh_Hans: 被模仿的音色音频 - en_US: source audio to be mocked - - variable: prompt_text - show_on: - - variable: __model_type - value: tts - label: - en_US: Prompt Audio Text - type: text-input - required: false - placeholder: - zh_Hans: 模仿音色的对应文本 - en_US: text for the mocked source audio - - variable: instruct_text - show_on: - - variable: __model_type - value: tts - label: - en_US: instruct text for speaker - type: text-input - required: false - - variable: aws_access_key_id - required: false - label: - en_US: Access Key (If not provided, credentials are obtained from the running environment.) - zh_Hans: Access Key (如果未提供,凭证将从运行环境中获取。) - type: secret-input - placeholder: - en_US: Enter your Access Key - zh_Hans: 在此输入您的 Access Key - - variable: aws_secret_access_key - required: false - label: - en_US: Secret Access Key - zh_Hans: Secret Access Key - type: secret-input - placeholder: - en_US: Enter your Secret Access Key - zh_Hans: 在此输入您的 Secret Access Key - - variable: aws_region - required: false - label: - en_US: AWS Region - zh_Hans: AWS 地区 - type: select - default: us-east-1 - options: - - value: us-east-1 - label: - en_US: US East (N. Virginia) - zh_Hans: 美国东部 (弗吉尼亚北部) - - value: us-west-2 - label: - en_US: US West (Oregon) - zh_Hans: 美国西部 (俄勒冈州) - - value: ap-southeast-1 - label: - en_US: Asia Pacific (Singapore) - zh_Hans: 亚太地区 (新加坡) - - value: ap-northeast-1 - label: - en_US: Asia Pacific (Tokyo) - zh_Hans: 亚太地区 (东京) - - value: eu-central-1 - label: - en_US: Europe (Frankfurt) - zh_Hans: 欧洲 (法兰克福) - - value: us-gov-west-1 - label: - en_US: AWS GovCloud (US-West) - zh_Hans: AWS GovCloud (US-West) - - value: ap-southeast-2 - label: - en_US: Asia Pacific (Sydney) - zh_Hans: 亚太地区 (悉尼) - - value: cn-north-1 - label: - en_US: AWS Beijing (cn-north-1) - zh_Hans: 中国北京 (cn-north-1) - - value: cn-northwest-1 - label: - en_US: AWS Ningxia (cn-northwest-1) - zh_Hans: 中国宁夏 (cn-northwest-1) diff --git a/api/core/model_runtime/model_providers/sagemaker/speech2text/__init__.py b/api/core/model_runtime/model_providers/sagemaker/speech2text/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/sagemaker/speech2text/speech2text.py b/api/core/model_runtime/model_providers/sagemaker/speech2text/speech2text.py deleted file mode 100644 index 6aa8c9995f..0000000000 --- a/api/core/model_runtime/model_providers/sagemaker/speech2text/speech2text.py +++ /dev/null @@ -1,125 +0,0 @@ -import json -import logging -from typing import IO, Any, Optional - -import boto3 - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel -from core.model_runtime.model_providers.sagemaker.sagemaker import generate_presigned_url - -logger = logging.getLogger(__name__) - - -class SageMakerSpeech2TextModel(Speech2TextModel): - """ - Model class for Xinference speech to text model. - """ - - sagemaker_client: Any = None - s3_client: Any = None - - def _invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) -> str: - """ - Invoke speech2text model - - :param model: model name - :param credentials: model credentials - :param file: audio file - :param user: unique user id - :return: text for given audio file - """ - asr_text = None - - try: - if not self.sagemaker_client: - access_key = credentials.get("aws_access_key_id") - secret_key = credentials.get("aws_secret_access_key") - aws_region = credentials.get("aws_region") - if aws_region: - if access_key and secret_key: - self.sagemaker_client = boto3.client( - "sagemaker-runtime", - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - region_name=aws_region, - ) - self.s3_client = boto3.client( - "s3", aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=aws_region - ) - else: - self.sagemaker_client = boto3.client("sagemaker-runtime", region_name=aws_region) - self.s3_client = boto3.client("s3", region_name=aws_region) - else: - self.sagemaker_client = boto3.client("sagemaker-runtime") - self.s3_client = boto3.client("s3") - - s3_prefix = "dify/speech2text/" - sagemaker_endpoint = credentials.get("sagemaker_endpoint") - bucket = credentials.get("audio_s3_cache_bucket") - - s3_presign_url = generate_presigned_url(self.s3_client, file, bucket, s3_prefix) - payload = {"audio_s3_presign_uri": s3_presign_url} - - response_model = self.sagemaker_client.invoke_endpoint( - EndpointName=sagemaker_endpoint, Body=json.dumps(payload), ContentType="application/json" - ) - json_str = response_model["Body"].read().decode("utf8") - json_obj = json.loads(json_str) - asr_text = json_obj["text"] - except Exception as e: - logger.exception(f"Exception {e}, line : {line}") - - return asr_text - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - pass - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.SPEECH2TEXT, - model_properties={}, - parameter_rules=[], - ) - - return entity diff --git a/api/core/model_runtime/model_providers/sagemaker/text_embedding/__init__.py b/api/core/model_runtime/model_providers/sagemaker/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/sagemaker/tts/__init__.py b/api/core/model_runtime/model_providers/sagemaker/tts/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/sagemaker/tts/tts.py b/api/core/model_runtime/model_providers/sagemaker/tts/tts.py deleted file mode 100644 index a22bd6dd6e..0000000000 --- a/api/core/model_runtime/model_providers/sagemaker/tts/tts.py +++ /dev/null @@ -1,275 +0,0 @@ -import concurrent.futures -import copy -import json -import logging -from enum import Enum -from typing import Any, Optional - -import boto3 -import requests - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.model_providers.__base.tts_model import TTSModel - -logger = logging.getLogger(__name__) - - -class TTSModelType(Enum): - PresetVoice = "PresetVoice" - CloneVoice = "CloneVoice" - CloneVoice_CrossLingual = "CloneVoice_CrossLingual" - InstructVoice = "InstructVoice" - - -class SageMakerText2SpeechModel(TTSModel): - sagemaker_client: Any = None - s3_client: Any = None - comprehend_client: Any = None - - def __init__(self): - # preset voices, need support custom voice - self.model_voices = { - "__default": { - "all": [ - {"name": "Default", "value": "default"}, - ] - }, - "CosyVoice": { - "zh-Hans": [ - {"name": "中文男", "value": "中文男"}, - {"name": "中文女", "value": "中文女"}, - {"name": "粤语女", "value": "粤语女"}, - ], - "zh-Hant": [ - {"name": "中文男", "value": "中文男"}, - {"name": "中文女", "value": "中文女"}, - {"name": "粤语女", "value": "粤语女"}, - ], - "en-US": [ - {"name": "英文男", "value": "英文男"}, - {"name": "英文女", "value": "英文女"}, - ], - "ja-JP": [ - {"name": "日语男", "value": "日语男"}, - ], - "ko-KR": [ - {"name": "韩语女", "value": "韩语女"}, - ], - }, - } - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - pass - - def _detect_lang_code(self, content: str, map_dict: dict = None): - map_dict = {"zh": "<|zh|>", "en": "<|en|>", "ja": "<|jp|>", "zh-TW": "<|yue|>", "ko": "<|ko|>"} - - response = self.comprehend_client.detect_dominant_language(Text=content) - language_code = response["Languages"][0]["LanguageCode"] - - return map_dict.get(language_code, "<|zh|>") - - def _build_tts_payload( - self, - model_type: str, - content_text: str, - model_role: str, - prompt_text: str, - prompt_audio: str, - instruct_text: str, - ): - if model_type == TTSModelType.PresetVoice.value and model_role: - return {"tts_text": content_text, "role": model_role} - if model_type == TTSModelType.CloneVoice.value and prompt_text and prompt_audio: - return {"tts_text": content_text, "prompt_text": prompt_text, "prompt_audio": prompt_audio} - if model_type == TTSModelType.CloneVoice_CrossLingual.value and prompt_audio: - lang_tag = self._detect_lang_code(content_text) - return {"tts_text": f"{content_text}", "prompt_audio": prompt_audio, "lang_tag": lang_tag} - if model_type == TTSModelType.InstructVoice.value and instruct_text and model_role: - return {"tts_text": content_text, "role": model_role, "instruct_text": instruct_text} - - raise RuntimeError(f"Invalid params for {model_type}") - - def _invoke( - self, model: str, tenant_id: str, credentials: dict, content_text: str, voice: str, user: Optional[str] = None - ): - """ - _invoke text2speech model - - :param model: model name - :param tenant_id: user tenant id - :param credentials: model credentials - :param voice: model timbre - :param content_text: text content to be translated - :param user: unique user id - :return: text translated to audio file - """ - if not self.sagemaker_client: - access_key = credentials.get("aws_access_key_id") - secret_key = credentials.get("aws_secret_access_key") - aws_region = credentials.get("aws_region") - if aws_region: - if access_key and secret_key: - self.sagemaker_client = boto3.client( - "sagemaker-runtime", - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - region_name=aws_region, - ) - self.s3_client = boto3.client( - "s3", aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=aws_region - ) - self.comprehend_client = boto3.client( - "comprehend", - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - region_name=aws_region, - ) - else: - self.sagemaker_client = boto3.client("sagemaker-runtime", region_name=aws_region) - self.s3_client = boto3.client("s3", region_name=aws_region) - self.comprehend_client = boto3.client("comprehend", region_name=aws_region) - else: - self.sagemaker_client = boto3.client("sagemaker-runtime") - self.s3_client = boto3.client("s3") - self.comprehend_client = boto3.client("comprehend") - - model_type = credentials.get("audio_model_type", "PresetVoice") - prompt_text = credentials.get("prompt_text") - prompt_audio = credentials.get("prompt_audio") - instruct_text = credentials.get("instruct_text") - sagemaker_endpoint = credentials.get("sagemaker_endpoint") - payload = self._build_tts_payload(model_type, content_text, voice, prompt_text, prompt_audio, instruct_text) - - return self._tts_invoke_streaming(model_type, payload, sagemaker_endpoint) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.TTS, - model_properties={}, - parameter_rules=[], - ) - - return entity - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError], - } - - def _get_model_default_voice(self, model: str, credentials: dict) -> any: - return "" - - def _get_model_word_limit(self, model: str, credentials: dict) -> int: - return 15 - - def _get_model_audio_type(self, model: str, credentials: dict) -> str: - return "mp3" - - def _get_model_workers_limit(self, model: str, credentials: dict) -> int: - return 5 - - def get_tts_model_voices(self, model: str, credentials: dict, language: Optional[str] = None) -> list: - audio_model_name = "CosyVoice" - for key, voices in self.model_voices.items(): - if key in audio_model_name: - if language and language in voices: - return voices[language] - elif "all" in voices: - return voices["all"] - - return self.model_voices["__default"]["all"] - - def _invoke_sagemaker(self, payload: dict, endpoint: str): - response_model = self.sagemaker_client.invoke_endpoint( - EndpointName=endpoint, - Body=json.dumps(payload), - ContentType="application/json", - ) - json_str = response_model["Body"].read().decode("utf8") - json_obj = json.loads(json_str) - return json_obj - - def _tts_invoke_streaming(self, model_type: str, payload: dict, sagemaker_endpoint: str) -> any: - """ - _tts_invoke_streaming text2speech model - - :param model: model name - :param credentials: model credentials - :param content_text: text content to be translated - :param voice: model timbre - :return: text translated to audio file - """ - try: - lang_tag = "" - if model_type == TTSModelType.CloneVoice_CrossLingual.value: - lang_tag = payload.pop("lang_tag") - - word_limit = self._get_model_word_limit(model="", credentials={}) - content_text = payload.get("tts_text") - if len(content_text) > word_limit: - split_sentences = self._split_text_into_sentences(content_text, max_length=word_limit) - sentences = [f"{lang_tag}{s}" for s in split_sentences if len(s)] - len_sent = len(sentences) - executor = concurrent.futures.ThreadPoolExecutor(max_workers=min(4, len_sent)) - payloads = [copy.deepcopy(payload) for i in range(len_sent)] - for idx in range(len_sent): - payloads[idx]["tts_text"] = sentences[idx] - - futures = [ - executor.submit( - self._invoke_sagemaker, - payload=payload, - endpoint=sagemaker_endpoint, - ) - for payload in payloads - ] - - for future in futures: - resp = future.result() - audio_bytes = requests.get(resp.get("s3_presign_url")).content - for i in range(0, len(audio_bytes), 1024): - yield audio_bytes[i : i + 1024] - else: - resp = self._invoke_sagemaker(payload, sagemaker_endpoint) - audio_bytes = requests.get(resp.get("s3_presign_url")).content - - for i in range(0, len(audio_bytes), 1024): - yield audio_bytes[i : i + 1024] - except Exception as ex: - raise InvokeBadRequestError(str(ex)) diff --git a/api/core/model_runtime/model_providers/siliconflow/_assets/siliconflow.svg b/api/core/model_runtime/model_providers/siliconflow/_assets/siliconflow.svg deleted file mode 100644 index 16e406f030..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/_assets/siliconflow.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/siliconflow/_assets/siliconflow_square.svg b/api/core/model_runtime/model_providers/siliconflow/_assets/siliconflow_square.svg deleted file mode 100644 index ad6b384f7a..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/_assets/siliconflow_square.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/deepdeek-coder-v2-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/deepdeek-coder-v2-instruct.yaml deleted file mode 100644 index d4431179e5..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/deepdeek-coder-v2-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: deepseek-ai/DeepSeek-Coder-V2-Instruct -label: - en_US: deepseek-ai/DeepSeek-Coder-V2-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '1.33' - output: '1.33' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-v2-chat.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-v2-chat.yaml deleted file mode 100644 index caa6508b5e..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-v2-chat.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: deepseek-ai/DeepSeek-V2-Chat -label: - en_US: deepseek-ai/DeepSeek-V2-Chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '1.33' - output: '1.33' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-v2.5.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-v2.5.yaml deleted file mode 100644 index 1c8e15ae52..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/deepseek-v2.5.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: deepseek-ai/DeepSeek-V2.5 -label: - en_US: deepseek-ai/DeepSeek-V2.5 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '1.33' - output: '1.33' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/gemma-2-27b-it.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/gemma-2-27b-it.yaml deleted file mode 100644 index 2840e3dcf4..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/gemma-2-27b-it.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: google/gemma-2-27b-it -label: - en_US: google/gemma-2-27b-it -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8196 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '1.26' - output: '1.26' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/gemma-2-9b-it.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/gemma-2-9b-it.yaml deleted file mode 100644 index d7e19b46f6..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/gemma-2-9b-it.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: google/gemma-2-9b-it -label: - en_US: google/gemma-2-9b-it -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8196 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0' - output: '0' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/glm4-9b-chat.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/glm4-9b-chat.yaml deleted file mode 100644 index 9b32a02477..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/glm4-9b-chat.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: THUDM/glm-4-9b-chat -label: - en_US: THUDM/glm-4-9b-chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0' - output: '0' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/internlm2_5-7b-chat.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/internlm2_5-7b-chat.yaml deleted file mode 100644 index 73ad4480aa..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/internlm2_5-7b-chat.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: internlm/internlm2_5-7b-chat -label: - en_US: internlm/internlm2_5-7b-chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0' - output: '0' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/llm.py b/api/core/model_runtime/model_providers/siliconflow/llm/llm.py deleted file mode 100644 index c1868b6ad0..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/llm.py +++ /dev/null @@ -1,31 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union - -from core.model_runtime.entities.llm_entities import LLMResult -from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel - - -class SiliconflowLargeLanguageModel(OAIAPICompatLargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._add_custom_parameters(credentials) - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream) - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials) - super().validate_credentials(model, credentials) - - @classmethod - def _add_custom_parameters(cls, credentials: dict) -> None: - credentials["mode"] = "chat" - credentials["endpoint_url"] = "https://api.siliconflow.cn/v1" diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3-70b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3-70b-instruct.yaml deleted file mode 100644 index 9993d781ac..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3-70b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: meta-llama/Meta-Llama-3-70B-Instruct -label: - en_US: meta-llama/Meta-Llama-3-70B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '4.13' - output: '4.13' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3-8b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3-8b-instruct.yaml deleted file mode 100644 index 60e3764789..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3-8b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: meta-llama/Meta-Llama-3-8B-Instruct -label: - en_US: meta-llama/Meta-Llama-3-8B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0' - output: '0' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-405b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-405b-instruct.yaml deleted file mode 100644 index f992660aa2..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-405b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: meta-llama/Meta-Llama-3.1-405B-Instruct -label: - en_US: meta-llama/Meta-Llama-3.1-405B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '21' - output: '21' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-70b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-70b-instruct.yaml deleted file mode 100644 index 1c69d63a40..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-70b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: meta-llama/Meta-Llama-3.1-70B-Instruct -label: - en_US: meta-llama/Meta-Llama-3.1-70B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '4.13' - output: '4.13' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-8b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-8b-instruct.yaml deleted file mode 100644 index a97002a5ca..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/meta-mlama-3.1-8b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: meta-llama/Meta-Llama-3.1-8B-Instruct -label: - en_US: meta-llama/Meta-Llama-3.1-8B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0' - output: '0' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml deleted file mode 100644 index 89fb153ba0..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml +++ /dev/null @@ -1,31 +0,0 @@ -model: mistralai/Mistral-7B-Instruct-v0.2 -label: - en_US: mistralai/Mistral-7B-Instruct-v0.2 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0' - output: '0' - unit: '0.000001' - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml deleted file mode 100644 index 2785e7496f..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml +++ /dev/null @@ -1,31 +0,0 @@ -model: mistralai/Mixtral-8x7B-Instruct-v0.1 -label: - en_US: mistralai/Mixtral-8x7B-Instruct-v0.1 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '1.26' - output: '1.26' - unit: '0.000001' - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-1.5b-instruct.yaml deleted file mode 100644 index f6c976af8e..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-1.5b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: Qwen/Qwen2-1.5B-Instruct -label: - en_US: Qwen/Qwen2-1.5B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0' - output: '0' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-57b-a14b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-57b-a14b-instruct.yaml deleted file mode 100644 index a996e919ea..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-57b-a14b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: Qwen/Qwen2-57B-A14B-Instruct -label: - en_US: Qwen/Qwen2-57B-A14B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '1.26' - output: '1.26' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-72b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-72b-instruct.yaml deleted file mode 100644 index a6e2c22dac..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-72b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: Qwen/Qwen2-72B-Instruct -label: - en_US: Qwen/Qwen2-72B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '4.13' - output: '4.13' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-7b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-7b-instruct.yaml deleted file mode 100644 index d8bea5e129..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2-7b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: Qwen/Qwen2-7B-Instruct -label: - en_US: Qwen/Qwen2-7B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0' - output: '0' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-14b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-14b-instruct.yaml deleted file mode 100644 index 02a401464b..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-14b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: Qwen/Qwen2.5-14B-Instruct -label: - en_US: Qwen/Qwen2.5-14B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 8192 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0.7' - output: '0.7' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-32b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-32b-instruct.yaml deleted file mode 100644 index d084617e7d..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-32b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: Qwen/Qwen2.5-32B-Instruct -label: - en_US: Qwen/Qwen2.5-32B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 8192 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '1.26' - output: '1.26' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-72b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-72b-instruct.yaml deleted file mode 100644 index dfbad2494c..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-72b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: Qwen/Qwen2.5-72B-Instruct -label: - en_US: Qwen/Qwen2.5-72B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 8192 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '4.13' - output: '4.13' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-7b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-7b-instruct.yaml deleted file mode 100644 index cdc8ffc4d2..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-7b-instruct.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: Qwen/Qwen2.5-7B-Instruct -label: - en_US: Qwen/Qwen2.5-7B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 8192 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0' - output: '0' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-34b-chat.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-34b-chat.yaml deleted file mode 100644 index 864ba46f1a..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-34b-chat.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: 01-ai/Yi-1.5-34B-Chat -label: - en_US: 01-ai/Yi-1.5-34B-Chat-16K -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 16384 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '1.26' - output: '1.26' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-6b-chat.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-6b-chat.yaml deleted file mode 100644 index fe4c8b4b3e..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-6b-chat.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: 01-ai/Yi-1.5-6B-Chat -label: - en_US: 01-ai/Yi-1.5-6B-Chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0' - output: '0' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-9b-chat.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-9b-chat.yaml deleted file mode 100644 index c61f0dc53f..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/yi-1.5-9b-chat.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: 01-ai/Yi-1.5-9B-Chat-16K -label: - en_US: 01-ai/Yi-1.5-9B-Chat-16K -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 16384 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '0' - output: '0' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/rerank/__init__.py b/api/core/model_runtime/model_providers/siliconflow/rerank/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/siliconflow/rerank/bce-reranker-base_v1.yaml b/api/core/model_runtime/model_providers/siliconflow/rerank/bce-reranker-base_v1.yaml deleted file mode 100644 index ff3635bfeb..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/rerank/bce-reranker-base_v1.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: netease-youdao/bce-reranker-base_v1 -model_type: rerank -model_properties: - context_size: 512 diff --git a/api/core/model_runtime/model_providers/siliconflow/rerank/bge-reranker-v2-m3.yaml b/api/core/model_runtime/model_providers/siliconflow/rerank/bge-reranker-v2-m3.yaml deleted file mode 100644 index 807f531b08..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/rerank/bge-reranker-v2-m3.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: BAAI/bge-reranker-v2-m3 -model_type: rerank -model_properties: - context_size: 8192 diff --git a/api/core/model_runtime/model_providers/siliconflow/rerank/rerank.py b/api/core/model_runtime/model_providers/siliconflow/rerank/rerank.py deleted file mode 100644 index 58b033d28a..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/rerank/rerank.py +++ /dev/null @@ -1,85 +0,0 @@ -from typing import Optional - -import httpx - -from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.rerank_model import RerankModel - - -class SiliconflowRerankModel(RerankModel): - def _invoke( - self, - model: str, - credentials: dict, - query: str, - docs: list[str], - score_threshold: Optional[float] = None, - top_n: Optional[int] = None, - user: Optional[str] = None, - ) -> RerankResult: - if len(docs) == 0: - return RerankResult(model=model, docs=[]) - - base_url = credentials.get("base_url", "https://api.siliconflow.cn/v1") - base_url = base_url.removesuffix("/") - try: - response = httpx.post( - base_url + "/rerank", - json={"model": model, "query": query, "documents": docs, "top_n": top_n, "return_documents": True}, - headers={"Authorization": f"Bearer {credentials.get('api_key')}"}, - ) - response.raise_for_status() - results = response.json() - - rerank_documents = [] - for result in results["results"]: - rerank_document = RerankDocument( - index=result["index"], - text=result["document"]["text"], - score=result["relevance_score"], - ) - if score_threshold is None or result["relevance_score"] >= score_threshold: - rerank_documents.append(rerank_document) - - return RerankResult(model=model, docs=rerank_documents) - except httpx.HTTPStatusError as e: - raise InvokeServerUnavailableError(str(e)) - - def validate_credentials(self, model: str, credentials: dict) -> None: - try: - self._invoke( - model=model, - credentials=credentials, - query="What is the capital of the United States?", - docs=[ - "Carson City is the capital city of the American state of Nevada. At the 2010 United States " - "Census, Carson City had a population of 55,274.", - "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " - "are a political division controlled by the United States. Its capital is Saipan.", - ], - score_threshold=0.8, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - """ - return { - InvokeConnectionError: [httpx.ConnectError], - InvokeServerUnavailableError: [httpx.RemoteProtocolError], - InvokeRateLimitError: [], - InvokeAuthorizationError: [httpx.HTTPStatusError], - InvokeBadRequestError: [httpx.RequestError], - } diff --git a/api/core/model_runtime/model_providers/siliconflow/siliconflow.py b/api/core/model_runtime/model_providers/siliconflow/siliconflow.py deleted file mode 100644 index e121ab8c7e..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/siliconflow.py +++ /dev/null @@ -1,26 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class SiliconflowProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - model_instance.validate_credentials(model="deepseek-ai/DeepSeek-V2-Chat", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/siliconflow/siliconflow.yaml b/api/core/model_runtime/model_providers/siliconflow/siliconflow.yaml deleted file mode 100644 index c46a891604..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/siliconflow.yaml +++ /dev/null @@ -1,32 +0,0 @@ -provider: siliconflow -label: - zh_Hans: 硅基流动 - en_US: SiliconFlow -icon_small: - en_US: siliconflow_square.svg -icon_large: - en_US: siliconflow.svg -background: "#ffecff" -help: - title: - en_US: Get your API Key from SiliconFlow - zh_Hans: 从 SiliconFlow 获取 API Key - url: - en_US: https://cloud.siliconflow.cn/account/ak -supported_model_types: - - llm - - text-embedding - - rerank - - speech2text -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/siliconflow/speech2text/__init__.py b/api/core/model_runtime/model_providers/siliconflow/speech2text/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/siliconflow/speech2text/sense-voice-small.yaml b/api/core/model_runtime/model_providers/siliconflow/speech2text/sense-voice-small.yaml deleted file mode 100644 index deceaf60f4..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/speech2text/sense-voice-small.yaml +++ /dev/null @@ -1,5 +0,0 @@ -model: iic/SenseVoiceSmall -model_type: speech2text -model_properties: - file_upload_limit: 1 - supported_file_extensions: mp3,wav diff --git a/api/core/model_runtime/model_providers/siliconflow/speech2text/speech2text.py b/api/core/model_runtime/model_providers/siliconflow/speech2text/speech2text.py deleted file mode 100644 index 8d1932863e..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/speech2text/speech2text.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import IO, Optional - -from core.model_runtime.model_providers.openai_api_compatible.speech2text.speech2text import OAICompatSpeech2TextModel - - -class SiliconflowSpeech2TextModel(OAICompatSpeech2TextModel): - """ - Model class for Siliconflow Speech to text model. - """ - - def _invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) -> str: - """ - Invoke speech2text model - - :param model: model name - :param credentials: model credentials - :param file: audio file - :param user: unique user id - :return: text for given audio file - """ - self._add_custom_parameters(credentials) - return super()._invoke(model, credentials, file) - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials) - return super().validate_credentials(model, credentials) - - @classmethod - def _add_custom_parameters(cls, credentials: dict) -> None: - credentials["endpoint_url"] = "https://api.siliconflow.cn/v1" diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/bce-embedding-base-v1.yaml b/api/core/model_runtime/model_providers/siliconflow/text_embedding/bce-embedding-base-v1.yaml deleted file mode 100644 index 710fbc04f6..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/text_embedding/bce-embedding-base-v1.yaml +++ /dev/null @@ -1,5 +0,0 @@ -model: netease-youdao/bce-embedding-base_v1 -model_type: text-embedding -model_properties: - context_size: 512 - max_chunks: 1 diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-large-en-v1.5.yaml b/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-large-en-v1.5.yaml deleted file mode 100644 index 84f69b41a0..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-large-en-v1.5.yaml +++ /dev/null @@ -1,5 +0,0 @@ -model: BAAI/bge-large-en-v1.5 -model_type: text-embedding -model_properties: - context_size: 512 - max_chunks: 1 diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-large-zh-v1.5.yaml b/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-large-zh-v1.5.yaml deleted file mode 100644 index 5248375d0b..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-large-zh-v1.5.yaml +++ /dev/null @@ -1,5 +0,0 @@ -model: BAAI/bge-large-zh-v1.5 -model_type: text-embedding -model_properties: - context_size: 512 - max_chunks: 1 diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-m3.yaml b/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-m3.yaml deleted file mode 100644 index f0b12dd420..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/text_embedding/bge-m3.yaml +++ /dev/null @@ -1,5 +0,0 @@ -model: BAAI/bge-m3 -model_type: text-embedding -model_properties: - context_size: 8192 - max_chunks: 1 diff --git a/api/core/model_runtime/model_providers/spark/__init__.py b/api/core/model_runtime/model_providers/spark/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/spark/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/spark/_assets/icon_l_en.svg deleted file mode 100644 index 521c68cae5..0000000000 --- a/api/core/model_runtime/model_providers/spark/_assets/icon_l_en.svg +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/spark/_assets/icon_l_zh.svg b/api/core/model_runtime/model_providers/spark/_assets/icon_l_zh.svg deleted file mode 100644 index 71d85216aa..0000000000 --- a/api/core/model_runtime/model_providers/spark/_assets/icon_l_zh.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/spark/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/spark/_assets/icon_s_en.svg deleted file mode 100644 index ef0a9131a4..0000000000 --- a/api/core/model_runtime/model_providers/spark/_assets/icon_s_en.svg +++ /dev/null @@ -1,5 +0,0 @@ - - - - - diff --git a/api/core/model_runtime/model_providers/spark/llm/__init__.py b/api/core/model_runtime/model_providers/spark/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/spark/llm/_client.py b/api/core/model_runtime/model_providers/spark/llm/_client.py deleted file mode 100644 index 48911f657a..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/_client.py +++ /dev/null @@ -1,163 +0,0 @@ -import base64 -import hashlib -import hmac -import json -import queue -import ssl -from datetime import datetime -from time import mktime -from typing import Optional -from urllib.parse import urlencode, urlparse -from wsgiref.handlers import format_date_time - -import websocket - - -class SparkLLMClient: - def __init__(self, model: str, app_id: str, api_key: str, api_secret: str, api_domain: Optional[str] = None): - domain = "spark-api.xf-yun.com" - endpoint = "chat" - if api_domain: - domain = api_domain - - model_api_configs = { - "spark-lite": {"version": "v1.1", "chat_domain": "general"}, - "spark-pro": {"version": "v3.1", "chat_domain": "generalv3"}, - "spark-pro-128k": {"version": "pro-128k", "chat_domain": "pro-128k"}, - "spark-max": {"version": "v3.5", "chat_domain": "generalv3.5"}, - "spark-max-32k": {"version": "max-32k", "chat_domain": "max-32k"}, - "spark-4.0-ultra": {"version": "v4.0", "chat_domain": "4.0Ultra"}, - } - - api_version = model_api_configs[model]["version"] - - self.chat_domain = model_api_configs[model]["chat_domain"] - - if model in ["spark-pro-128k", "spark-max-32k"]: - self.api_base = f"wss://{domain}/{endpoint}/{api_version}" - else: - self.api_base = f"wss://{domain}/{api_version}/{endpoint}" - - self.app_id = app_id - self.ws_url = self.create_url( - urlparse(self.api_base).netloc, urlparse(self.api_base).path, self.api_base, api_key, api_secret - ) - - self.queue = queue.Queue() - self.blocking_message = "" - - def create_url(self, host: str, path: str, api_base: str, api_key: str, api_secret: str) -> str: - # generate timestamp by RFC1123 - now = datetime.now() - date = format_date_time(mktime(now.timetuple())) - - signature_origin = "host: " + host + "\n" - signature_origin += "date: " + date + "\n" - signature_origin += "GET " + path + " HTTP/1.1" - - # encrypt using hmac-sha256 - signature_sha = hmac.new( - api_secret.encode("utf-8"), signature_origin.encode("utf-8"), digestmod=hashlib.sha256 - ).digest() - - signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding="utf-8") - - authorization_origin = ( - f'api_key="{api_key}", algorithm="hmac-sha256", headers="host date request-line",' - f' signature="{signature_sha_base64}"' - ) - - authorization = base64.b64encode(authorization_origin.encode("utf-8")).decode(encoding="utf-8") - - v = {"authorization": authorization, "date": date, "host": host} - # generate url - url = api_base + "?" + urlencode(v) - return url - - def run(self, messages: list, user_id: str, model_kwargs: Optional[dict] = None, streaming: bool = False): - websocket.enableTrace(False) - ws = websocket.WebSocketApp( - self.ws_url, - on_message=self.on_message, - on_error=self.on_error, - on_close=self.on_close, - on_open=self.on_open, - ) - ws.messages = messages - ws.user_id = user_id - ws.model_kwargs = model_kwargs - ws.streaming = streaming - ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE}) - - def on_error(self, ws, error): - self.queue.put({"status_code": error.status_code, "error": error.resp_body.decode("utf-8")}) - ws.close() - - def on_close(self, ws, close_status_code, close_reason): - self.queue.put({"done": True}) - - def on_open(self, ws): - self.blocking_message = "" - data = json.dumps(self.gen_params(messages=ws.messages, user_id=ws.user_id, model_kwargs=ws.model_kwargs)) - ws.send(data) - - def on_message(self, ws, message): - data = json.loads(message) - code = data["header"]["code"] - if code != 0: - self.queue.put({"status_code": 400, "error": f"Code: {code}, Error: {data['header']['message']}"}) - ws.close() - else: - choices = data["payload"]["choices"] - status = choices["status"] - content = choices["text"][0]["content"] - if ws.streaming: - self.queue.put({"data": content}) - else: - self.blocking_message += content - - if status == 2: - if not ws.streaming: - self.queue.put({"data": self.blocking_message}) - ws.close() - - def gen_params(self, messages: list, user_id: str, model_kwargs: Optional[dict] = None) -> dict: - data = { - "header": { - "app_id": self.app_id, - # resolve this error message => $.header.uid' length must be less or equal than 32 - "uid": user_id[:32] if user_id else None, - }, - "parameter": {"chat": {"domain": self.chat_domain}}, - "payload": {"message": {"text": messages}}, - } - - if model_kwargs: - data["parameter"]["chat"].update(model_kwargs) - - return data - - def subscribe(self): - while True: - content = self.queue.get() - if "error" in content: - if content["status_code"] == 401: - raise SparkError( - "[Spark] The credentials you provided are incorrect. " - "Please double-check and fill them in again." - ) - elif content["status_code"] == 403: - raise SparkError( - "[Spark] Sorry, the credentials you provided are access denied. " - "Please try again after obtaining the necessary permissions." - ) - else: - raise SparkError(f"[Spark] code: {content['status_code']}, error: {content['error']}") - - if "data" not in content: - break - yield content - - -class SparkError(Exception): - pass diff --git a/api/core/model_runtime/model_providers/spark/llm/_position.yaml b/api/core/model_runtime/model_providers/spark/llm/_position.yaml deleted file mode 100644 index 73f39cb119..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/_position.yaml +++ /dev/null @@ -1,11 +0,0 @@ -- spark-max-32k -- spark-4.0-ultra -- spark-max -- spark-pro-128k -- spark-pro -- spark-lite -- spark-4 -- spark-3.5 -- spark-3 -- spark-1.5 -- spark-2 diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-1.5.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-1.5.yaml deleted file mode 100644 index fcd65c24e0..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/spark-1.5.yaml +++ /dev/null @@ -1,34 +0,0 @@ -model: spark-1.5 -deprecated: true -label: - en_US: Spark V1.5 -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - help: - zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。 - en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question. - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 模型回答的tokens的最大长度。 - en_US: 模型回答的tokens的最大长度。 - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - default: 4 - min: 1 - max: 6 - help: - zh_Hans: 从 k 个候选中随机选择一个(非等概率)。 - en_US: Randomly select one from k candidates (non-equal probability). - required: false diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-2.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-2.yaml deleted file mode 100644 index 2db6805a2e..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/spark-2.yaml +++ /dev/null @@ -1,34 +0,0 @@ -model: spark-2 -deprecated: true -label: - en_US: Spark V2.0 -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - help: - zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。 - en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question. - - name: max_tokens - use_template: max_tokens - default: 2048 - min: 1 - max: 8192 - help: - zh_Hans: 模型回答的tokens的最大长度。 - en_US: 模型回答的tokens的最大长度。 - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - default: 4 - min: 1 - max: 6 - help: - zh_Hans: 从 k 个候选中随机选择一个(非等概率)。 - en_US: Randomly select one from k candidates (non-equal probability). - required: false diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-3.5.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-3.5.yaml deleted file mode 100644 index 86617a53d0..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/spark-3.5.yaml +++ /dev/null @@ -1,34 +0,0 @@ -model: spark-3.5 -deprecated: true -label: - en_US: Spark V3.5 -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - help: - zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。 - en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question. - - name: max_tokens - use_template: max_tokens - default: 2048 - min: 1 - max: 8192 - help: - zh_Hans: 模型回答的tokens的最大长度。 - en_US: 模型回答的tokens的最大长度。 - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - default: 4 - min: 1 - max: 6 - help: - zh_Hans: 从 k 个候选中随机选择一个(非等概率)。 - en_US: Randomly select one from k candidates (non-equal probability). - required: false diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-3.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-3.yaml deleted file mode 100644 index 9f296c684d..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/spark-3.yaml +++ /dev/null @@ -1,34 +0,0 @@ -model: spark-3 -deprecated: true -label: - en_US: Spark V3.0 -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - help: - zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。 - en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question. - - name: max_tokens - use_template: max_tokens - default: 2048 - min: 1 - max: 8192 - help: - zh_Hans: 模型回答的tokens的最大长度。 - en_US: 模型回答的tokens的最大长度。 - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - default: 4 - min: 1 - max: 6 - help: - zh_Hans: 从 k 个候选中随机选择一个(非等概率)。 - en_US: Randomly select one from k candidates (non-equal probability). - required: false diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-4.0-ultra.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-4.0-ultra.yaml deleted file mode 100644 index bbf85764f1..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/spark-4.0-ultra.yaml +++ /dev/null @@ -1,42 +0,0 @@ -model: spark-4.0-ultra -label: - en_US: Spark 4.0 Ultra -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - help: - zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。 - en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question. - - name: max_tokens - use_template: max_tokens - default: 4096 - min: 1 - max: 8192 - help: - zh_Hans: 模型回答的tokens的最大长度。 - en_US: Maximum length of tokens for the model response. - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - default: 4 - min: 1 - max: 6 - help: - zh_Hans: 从 k 个候选中随机选择一个(非等概率)。 - en_US: Randomly select one from k candidates (non-equal probability). - required: false - - name: show_ref_label - label: - zh_Hans: 联网检索 - en_US: web search - type: boolean - default: false - help: - zh_Hans: 该参数仅4.0 Ultra版本支持,当设置为true时,如果输入内容触发联网检索插件,会先返回检索信源列表,然后再返回星火回复结果,否则仅返回星火回复结果 - en_US: The parameter is only supported in the 4.0 Ultra version. When set to true, if the input triggers the online search plugin, it will first return a list of search sources and then return the Spark response. Otherwise, it will only return the Spark response. diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-4.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-4.yaml deleted file mode 100644 index 4b5529e81c..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/spark-4.yaml +++ /dev/null @@ -1,34 +0,0 @@ -model: spark-4 -deprecated: true -label: - en_US: Spark V4.0 -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - help: - zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。 - en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question. - - name: max_tokens - use_template: max_tokens - default: 4096 - min: 1 - max: 8192 - help: - zh_Hans: 模型回答的tokens的最大长度。 - en_US: 模型回答的tokens的最大长度。 - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - default: 4 - min: 1 - max: 6 - help: - zh_Hans: 从 k 个候选中随机选择一个(非等概率)。 - en_US: Randomly select one from k candidates (non-equal probability). - required: false diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-lite.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-lite.yaml deleted file mode 100644 index 1f6141a816..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/spark-lite.yaml +++ /dev/null @@ -1,33 +0,0 @@ -model: spark-lite -label: - en_US: Spark Lite -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - help: - zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。 - en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question. - - name: max_tokens - use_template: max_tokens - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 模型回答的tokens的最大长度。 - en_US: Maximum length of tokens for the model response. - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - default: 4 - min: 1 - max: 6 - help: - zh_Hans: 从 k 个候选中随机选择一个(非等概率)。 - en_US: Randomly select one from k candidates (non-equal probability). - required: false diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-max-32k.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-max-32k.yaml deleted file mode 100644 index 1a1ab6844c..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/spark-max-32k.yaml +++ /dev/null @@ -1,33 +0,0 @@ -model: spark-max-32k -label: - en_US: Spark Max-32K -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - help: - zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。 - en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question. - - name: max_tokens - use_template: max_tokens - default: 4096 - min: 1 - max: 8192 - help: - zh_Hans: 模型回答的tokens的最大长度。 - en_US: Maximum length of tokens for the model response. - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - default: 4 - min: 1 - max: 6 - help: - zh_Hans: 从 k 个候选中随机选择一个(非等概率)。 - en_US: Randomly select one from k candidates (non-equal probability). - required: false diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-max.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-max.yaml deleted file mode 100644 index 71eb2b86d3..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/spark-max.yaml +++ /dev/null @@ -1,33 +0,0 @@ -model: spark-max -label: - en_US: Spark Max -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - help: - zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。 - en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question. - - name: max_tokens - use_template: max_tokens - default: 4096 - min: 1 - max: 8192 - help: - zh_Hans: 模型回答的tokens的最大长度。 - en_US: Maximum length of tokens for the model response. - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - default: 4 - min: 1 - max: 6 - help: - zh_Hans: 从 k 个候选中随机选择一个(非等概率)。 - en_US: Randomly select one from k candidates (non-equal probability). - required: false diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-pro-128k.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-pro-128k.yaml deleted file mode 100644 index da1fead6da..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/spark-pro-128k.yaml +++ /dev/null @@ -1,33 +0,0 @@ -model: spark-pro-128k -label: - en_US: Spark Pro-128K -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - help: - zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。 - en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question. - - name: max_tokens - use_template: max_tokens - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 模型回答的tokens的最大长度。 - en_US: Maximum length of tokens for the model response. - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - default: 4 - min: 1 - max: 6 - help: - zh_Hans: 从 k 个候选中随机选择一个(非等概率)。 - en_US: Randomly select one from k candidates (non-equal probability). - required: false diff --git a/api/core/model_runtime/model_providers/spark/llm/spark-pro.yaml b/api/core/model_runtime/model_providers/spark/llm/spark-pro.yaml deleted file mode 100644 index 9ee479f15b..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/spark-pro.yaml +++ /dev/null @@ -1,33 +0,0 @@ -model: spark-pro -label: - en_US: Spark Pro -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.5 - help: - zh_Hans: 核采样阈值。用于决定结果随机性,取值越高随机性越强即相同的问题得到的不同答案的可能性越高。 - en_US: Kernel sampling threshold. Used to determine the randomness of the results. The higher the value, the stronger the randomness, that is, the higher the possibility of getting different answers to the same question. - - name: max_tokens - use_template: max_tokens - default: 4096 - min: 1 - max: 8192 - help: - zh_Hans: 模型回答的tokens的最大长度。 - en_US: Maximum length of tokens for the model response. - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - default: 4 - min: 1 - max: 6 - help: - zh_Hans: 从 k 个候选中随机选择一个(非等概率)。 - en_US: Randomly select one from k candidates (non-equal probability). - required: false diff --git a/api/core/model_runtime/model_providers/spark/spark.py b/api/core/model_runtime/model_providers/spark/spark.py deleted file mode 100644 index b3695e0501..0000000000 --- a/api/core/model_runtime/model_providers/spark/spark.py +++ /dev/null @@ -1,18 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class SparkProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - # ignore credentials validation because every model has their own spark quota pool - pass diff --git a/api/core/model_runtime/model_providers/spark/spark.yaml b/api/core/model_runtime/model_providers/spark/spark.yaml deleted file mode 100644 index 3b07b30f24..0000000000 --- a/api/core/model_runtime/model_providers/spark/spark.yaml +++ /dev/null @@ -1,46 +0,0 @@ -provider: spark -label: - zh_Hans: 讯飞星火 - en_US: iFLYTEK SPARK -icon_small: - en_US: icon_s_en.svg -icon_large: - zh_Hans: icon_l_zh.svg - en_US: icon_l_en.svg -background: "#EBF8FF" -help: - title: - en_US: Get your API key from iFLYTEK SPARK - zh_Hans: 从讯飞星火获取 API Keys - url: - en_US: https://www.xfyun.cn/solutions/xinghuoAPI -supported_model_types: - - llm -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: app_id - label: - en_US: APPID - type: text-input - required: true - placeholder: - zh_Hans: 在此输入您的 APPID - en_US: Enter your APPID - - variable: api_secret - label: - en_US: APISecret - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 APISecret - en_US: Enter your APISecret - - variable: api_key - label: - en_US: APIKey - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 APIKey - en_US: Enter your APIKey diff --git a/api/core/model_runtime/model_providers/stepfun/__init__.py b/api/core/model_runtime/model_providers/stepfun/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/stepfun/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/stepfun/_assets/icon_l_en.png deleted file mode 100644 index c118ea09bd..0000000000 Binary files a/api/core/model_runtime/model_providers/stepfun/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/stepfun/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/stepfun/_assets/icon_s_en.png deleted file mode 100644 index 85b96d0c74..0000000000 Binary files a/api/core/model_runtime/model_providers/stepfun/_assets/icon_s_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/stepfun/llm/__init__.py b/api/core/model_runtime/model_providers/stepfun/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/stepfun/llm/_position.yaml b/api/core/model_runtime/model_providers/stepfun/llm/_position.yaml deleted file mode 100644 index 2bb0c703f4..0000000000 --- a/api/core/model_runtime/model_providers/stepfun/llm/_position.yaml +++ /dev/null @@ -1,8 +0,0 @@ -- step-1-8k -- step-1-32k -- step-1-128k -- step-1-256k -- step-1-flash -- step-2-16k -- step-1v-8k -- step-1v-32k diff --git a/api/core/model_runtime/model_providers/stepfun/llm/llm.py b/api/core/model_runtime/model_providers/stepfun/llm/llm.py deleted file mode 100644 index dab666e4d0..0000000000 --- a/api/core/model_runtime/model_providers/stepfun/llm/llm.py +++ /dev/null @@ -1,328 +0,0 @@ -import json -from collections.abc import Generator -from typing import Optional, Union, cast - -import requests - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContent, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - ModelFeature, - ModelPropertyKey, - ModelType, - ParameterRule, - ParameterType, -) -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel - - -class StepfunLargeLanguageModel(OAIAPICompatLargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._add_custom_parameters(credentials) - self._add_function_call(model, credentials) - user = user[:32] if user else None - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials) - super().validate_credentials(model, credentials) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - return AIModelEntity( - model=model, - label=I18nObject(en_US=model, zh_Hans=model), - model_type=ModelType.LLM, - features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL, ModelFeature.STREAM_TOOL_CALL] - if credentials.get("function_calling_type") == "tool_call" - else [], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 8000)), - ModelPropertyKey.MODE: LLMMode.CHAT.value, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - use_template="temperature", - label=I18nObject(en_US="Temperature", zh_Hans="温度"), - type=ParameterType.FLOAT, - ), - ParameterRule( - name="max_tokens", - use_template="max_tokens", - default=512, - min=1, - max=int(credentials.get("max_tokens", 1024)), - label=I18nObject(en_US="Max Tokens", zh_Hans="最大标记"), - type=ParameterType.INT, - ), - ParameterRule( - name="top_p", - use_template="top_p", - label=I18nObject(en_US="Top P", zh_Hans="Top P"), - type=ParameterType.FLOAT, - ), - ], - ) - - def _add_custom_parameters(self, credentials: dict) -> None: - credentials["mode"] = "chat" - credentials["endpoint_url"] = "https://api.stepfun.com/v1" - - def _add_function_call(self, model: str, credentials: dict) -> None: - model_schema = self.get_model_schema(model, credentials) - if model_schema and {ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL}.intersection( - model_schema.features or [] - ): - credentials["function_calling_type"] = "tool_call" - - def _convert_prompt_message_to_dict(self, message: PromptMessage, credentials: Optional[dict] = None) -> dict: - """ - Convert PromptMessage to dict for OpenAI API format - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - sub_messages = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(PromptMessageContent, message_content) - sub_message_dict = {"type": "text", "text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - sub_message_dict = { - "type": "image_url", - "image_url": { - "url": message_content.data, - }, - } - sub_messages.append(sub_message_dict) - message_dict = {"role": "user", "content": sub_messages} - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - if message.tool_calls: - message_dict["tool_calls"] = [] - for function_call in message.tool_calls: - message_dict["tool_calls"].append( - { - "id": function_call.id, - "type": function_call.type, - "function": { - "name": function_call.function.name, - "arguments": function_call.function.arguments, - }, - } - ) - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - message_dict = {"role": "tool", "content": message.content, "tool_call_id": message.tool_call_id} - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - else: - raise ValueError(f"Got unknown type {message}") - - if message.name: - message_dict["name"] = message.name - - return message_dict - - def _extract_response_tool_calls(self, response_tool_calls: list[dict]) -> list[AssistantPromptMessage.ToolCall]: - """ - Extract tool calls from response - - :param response_tool_calls: response tool calls - :return: list of tool calls - """ - tool_calls = [] - if response_tool_calls: - for response_tool_call in response_tool_calls: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_tool_call["function"]["name"] - if response_tool_call.get("function", {}).get("name") - else "", - arguments=response_tool_call["function"]["arguments"] - if response_tool_call.get("function", {}).get("arguments") - else "", - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_tool_call["id"] if response_tool_call.get("id") else "", - type=response_tool_call["type"] if response_tool_call.get("type") else "", - function=function, - ) - tool_calls.append(tool_call) - - return tool_calls - - def _handle_generate_stream_response( - self, model: str, credentials: dict, response: requests.Response, prompt_messages: list[PromptMessage] - ) -> Generator: - """ - Handle llm stream response - - :param model: model name - :param credentials: model credentials - :param response: streamed response - :param prompt_messages: prompt messages - :return: llm response chunk generator - """ - full_assistant_content = "" - chunk_index = 0 - - def create_final_llm_result_chunk( - index: int, message: AssistantPromptMessage, finish_reason: str - ) -> LLMResultChunk: - # calculate num tokens - prompt_tokens = self._num_tokens_from_string(model, prompt_messages[0].content) - completion_tokens = self._num_tokens_from_string(model, full_assistant_content) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - return LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta(index=index, message=message, finish_reason=finish_reason, usage=usage), - ) - - tools_calls: list[AssistantPromptMessage.ToolCall] = [] - finish_reason = "Unknown" - - def increase_tool_call(new_tool_calls: list[AssistantPromptMessage.ToolCall]): - def get_tool_call(tool_name: str): - if not tool_name: - return tools_calls[-1] - - tool_call = next((tool_call for tool_call in tools_calls if tool_call.function.name == tool_name), None) - if tool_call is None: - tool_call = AssistantPromptMessage.ToolCall( - id="", - type="", - function=AssistantPromptMessage.ToolCall.ToolCallFunction(name=tool_name, arguments=""), - ) - tools_calls.append(tool_call) - - return tool_call - - for new_tool_call in new_tool_calls: - # get tool call - tool_call = get_tool_call(new_tool_call.function.name) - # update tool call - if new_tool_call.id: - tool_call.id = new_tool_call.id - if new_tool_call.type: - tool_call.type = new_tool_call.type - if new_tool_call.function.name: - tool_call.function.name = new_tool_call.function.name - if new_tool_call.function.arguments: - tool_call.function.arguments += new_tool_call.function.arguments - - for chunk in response.iter_lines(decode_unicode=True, delimiter="\n\n"): - if chunk: - # ignore sse comments - if chunk.startswith(":"): - continue - decoded_chunk = chunk.strip().lstrip("data: ").lstrip() - chunk_json = None - try: - chunk_json = json.loads(decoded_chunk) - # stream ended - except json.JSONDecodeError as e: - yield create_final_llm_result_chunk( - index=chunk_index + 1, - message=AssistantPromptMessage(content=""), - finish_reason="Non-JSON encountered.", - ) - break - if not chunk_json or len(chunk_json["choices"]) == 0: - continue - - choice = chunk_json["choices"][0] - finish_reason = chunk_json["choices"][0].get("finish_reason") - chunk_index += 1 - - if "delta" in choice: - delta = choice["delta"] - delta_content = delta.get("content") - - assistant_message_tool_calls = delta.get("tool_calls", None) - # assistant_message_function_call = delta.delta.function_call - - # extract tool calls from response - if assistant_message_tool_calls: - tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls) - increase_tool_call(tool_calls) - - if delta_content is None or delta_content == "": - continue - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=delta_content, tool_calls=tool_calls if assistant_message_tool_calls else [] - ) - - full_assistant_content += delta_content - elif "text" in choice: - choice_text = choice.get("text", "") - if choice_text == "": - continue - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=choice_text) - full_assistant_content += choice_text - else: - continue - - # check payload indicator for completion - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=chunk_index, - message=assistant_prompt_message, - ), - ) - - chunk_index += 1 - - if tools_calls: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=chunk_index, - message=AssistantPromptMessage(tool_calls=tools_calls, content=""), - ), - ) - - yield create_final_llm_result_chunk( - index=chunk_index, message=AssistantPromptMessage(content=""), finish_reason=finish_reason - ) diff --git a/api/core/model_runtime/model_providers/stepfun/llm/step-1-128k.yaml b/api/core/model_runtime/model_providers/stepfun/llm/step-1-128k.yaml deleted file mode 100644 index 13f7b7fd26..0000000000 --- a/api/core/model_runtime/model_providers/stepfun/llm/step-1-128k.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: step-1-128k -label: - zh_Hans: step-1-128k - en_US: step-1-128k -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 128000 -pricing: - input: '0.04' - output: '0.20' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/stepfun/llm/step-1-256k.yaml b/api/core/model_runtime/model_providers/stepfun/llm/step-1-256k.yaml deleted file mode 100644 index f80ec9851c..0000000000 --- a/api/core/model_runtime/model_providers/stepfun/llm/step-1-256k.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: step-1-256k -label: - zh_Hans: step-1-256k - en_US: step-1-256k -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 256000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 256000 -pricing: - input: '0.095' - output: '0.300' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/stepfun/llm/step-1-32k.yaml b/api/core/model_runtime/model_providers/stepfun/llm/step-1-32k.yaml deleted file mode 100644 index 96132d14a8..0000000000 --- a/api/core/model_runtime/model_providers/stepfun/llm/step-1-32k.yaml +++ /dev/null @@ -1,28 +0,0 @@ -model: step-1-32k -label: - zh_Hans: step-1-32k - en_US: step-1-32k -model_type: llm -features: - - agent-thought - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 32000 -pricing: - input: '0.015' - output: '0.070' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/stepfun/llm/step-1-8k.yaml b/api/core/model_runtime/model_providers/stepfun/llm/step-1-8k.yaml deleted file mode 100644 index 4a4ba8d178..0000000000 --- a/api/core/model_runtime/model_providers/stepfun/llm/step-1-8k.yaml +++ /dev/null @@ -1,28 +0,0 @@ -model: step-1-8k -label: - zh_Hans: step-1-8k - en_US: step-1-8k -model_type: llm -features: - - agent-thought - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8000 -pricing: - input: '0.005' - output: '0.020' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/stepfun/llm/step-1-flash.yaml b/api/core/model_runtime/model_providers/stepfun/llm/step-1-flash.yaml deleted file mode 100644 index afb880f2a4..0000000000 --- a/api/core/model_runtime/model_providers/stepfun/llm/step-1-flash.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: step-1-flash -label: - zh_Hans: step-1-flash - en_US: step-1-flash -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8000 -pricing: - input: '0.001' - output: '0.004' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/stepfun/llm/step-1v-32k.yaml b/api/core/model_runtime/model_providers/stepfun/llm/step-1v-32k.yaml deleted file mode 100644 index 08d6ad245d..0000000000 --- a/api/core/model_runtime/model_providers/stepfun/llm/step-1v-32k.yaml +++ /dev/null @@ -1,28 +0,0 @@ -model: step-1v-32k -label: - zh_Hans: step-1v-32k - en_US: step-1v-32k -model_type: llm -features: - - vision - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 32000 -pricing: - input: '0.015' - output: '0.070' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/stepfun/llm/step-1v-8k.yaml b/api/core/model_runtime/model_providers/stepfun/llm/step-1v-8k.yaml deleted file mode 100644 index 843d14d9c6..0000000000 --- a/api/core/model_runtime/model_providers/stepfun/llm/step-1v-8k.yaml +++ /dev/null @@ -1,28 +0,0 @@ -model: step-1v-8k -label: - zh_Hans: step-1v-8k - en_US: step-1v-8k -model_type: llm -features: - - vision - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.005' - output: '0.020' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/stepfun/llm/step-2-16k.yaml b/api/core/model_runtime/model_providers/stepfun/llm/step-2-16k.yaml deleted file mode 100644 index 6f2dabbfb0..0000000000 --- a/api/core/model_runtime/model_providers/stepfun/llm/step-2-16k.yaml +++ /dev/null @@ -1,28 +0,0 @@ -model: step-2-16k -label: - zh_Hans: step-2-16k - en_US: step-2-16k -model_type: llm -features: - - agent-thought - - tool-call - - multi-tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 16000 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 16000 -pricing: - input: '0.038' - output: '0.120' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/stepfun/stepfun.py b/api/core/model_runtime/model_providers/stepfun/stepfun.py deleted file mode 100644 index e1c41a9153..0000000000 --- a/api/core/model_runtime/model_providers/stepfun/stepfun.py +++ /dev/null @@ -1,26 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class StepfunProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - model_instance.validate_credentials(model="step-1-8k", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/stepfun/stepfun.yaml b/api/core/model_runtime/model_providers/stepfun/stepfun.yaml deleted file mode 100644 index ccc8455adc..0000000000 --- a/api/core/model_runtime/model_providers/stepfun/stepfun.yaml +++ /dev/null @@ -1,81 +0,0 @@ -provider: stepfun -label: - zh_Hans: 阶跃星辰 - en_US: Stepfun -description: - en_US: Models provided by stepfun, such as step-1-8k, step-1-32k、step-1v-8k、step-1v-32k, step-1-128k and step-1-256k - zh_Hans: 阶跃星辰提供的模型,例如 step-1-8k、step-1-32k、step-1v-8k、step-1v-32k、step-1-128k 和 step-1-256k。 -icon_small: - en_US: icon_s_en.png -icon_large: - en_US: icon_l_en.png -background: "#FFFFFF" -help: - title: - en_US: Get your API Key from stepfun - zh_Hans: 从 stepfun 获取 API Key - url: - en_US: https://platform.stepfun.com/interface-key -supported_model_types: - - llm -configurate_methods: - - predefined-model - - customizable-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: context_size - label: - zh_Hans: 模型上下文长度 - en_US: Model context size - required: true - type: text-input - default: '8192' - placeholder: - zh_Hans: 在此输入您的模型上下文长度 - en_US: Enter your Model context size - - variable: max_tokens - label: - zh_Hans: 最大 token 上限 - en_US: Upper bound for max tokens - default: '8192' - type: text-input - - variable: function_calling_type - label: - en_US: Function calling - type: select - required: false - default: no_call - options: - - value: no_call - label: - en_US: Not supported - zh_Hans: 不支持 - - value: tool_call - label: - en_US: Tool Call - zh_Hans: Tool Call diff --git a/api/core/model_runtime/model_providers/tencent/__init__.py b/api/core/model_runtime/model_providers/tencent/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/tencent/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/tencent/_assets/icon_l_en.svg deleted file mode 100644 index 63c7c8f988..0000000000 --- a/api/core/model_runtime/model_providers/tencent/_assets/icon_l_en.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - - tencent-cloud - - - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/tencent/_assets/icon_l_zh.svg b/api/core/model_runtime/model_providers/tencent/_assets/icon_l_zh.svg deleted file mode 100644 index 63c7c8f988..0000000000 --- a/api/core/model_runtime/model_providers/tencent/_assets/icon_l_zh.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - - tencent-cloud - - - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/tencent/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/tencent/_assets/icon_s_en.svg deleted file mode 100644 index a3299b9201..0000000000 --- a/api/core/model_runtime/model_providers/tencent/_assets/icon_s_en.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - tencent-cloud - - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/tencent/speech2text/__init__.py b/api/core/model_runtime/model_providers/tencent/speech2text/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/tencent/speech2text/flash_recognizer.py b/api/core/model_runtime/model_providers/tencent/speech2text/flash_recognizer.py deleted file mode 100644 index c3c21793e8..0000000000 --- a/api/core/model_runtime/model_providers/tencent/speech2text/flash_recognizer.py +++ /dev/null @@ -1,164 +0,0 @@ -import base64 -import hashlib -import hmac -import operator -import time - -import requests - - -class Credential: - def __init__(self, secret_id, secret_key): - self.secret_id = secret_id - self.secret_key = secret_key - - -class FlashRecognitionRequest: - def __init__(self, voice_format="mp3", engine_type="16k_zh"): - self.engine_type = engine_type - self.speaker_diarization = 0 - self.hotword_id = "" - self.customization_id = "" - self.filter_dirty = 0 - self.filter_modal = 0 - self.filter_punc = 0 - self.convert_num_mode = 1 - self.word_info = 0 - self.voice_format = voice_format - self.first_channel_only = 1 - self.reinforce_hotword = 0 - self.sentence_max_length = 0 - - def set_first_channel_only(self, first_channel_only): - self.first_channel_only = first_channel_only - - def set_speaker_diarization(self, speaker_diarization): - self.speaker_diarization = speaker_diarization - - def set_filter_dirty(self, filter_dirty): - self.filter_dirty = filter_dirty - - def set_filter_modal(self, filter_modal): - self.filter_modal = filter_modal - - def set_filter_punc(self, filter_punc): - self.filter_punc = filter_punc - - def set_convert_num_mode(self, convert_num_mode): - self.convert_num_mode = convert_num_mode - - def set_word_info(self, word_info): - self.word_info = word_info - - def set_hotword_id(self, hotword_id): - self.hotword_id = hotword_id - - def set_customization_id(self, customization_id): - self.customization_id = customization_id - - def set_voice_format(self, voice_format): - self.voice_format = voice_format - - def set_sentence_max_length(self, sentence_max_length): - self.sentence_max_length = sentence_max_length - - def set_reinforce_hotword(self, reinforce_hotword): - self.reinforce_hotword = reinforce_hotword - - -class FlashRecognizer: - """ - response: - request_id string - status Integer - message String - audio_duration Integer - flash_result Result Array - - Result: - text String - channel_id Integer - sentence_list Sentence Array - - Sentence: - text String - start_time Integer - end_time Integer - speaker_id Integer - word_list Word Array - - Word: - word String - start_time Integer - end_time Integer - stable_flag: Integer - """ - - def __init__(self, appid, credential): - self.credential = credential - self.appid = appid - - def _format_sign_string(self, param): - signstr = "POSTasr.cloud.tencent.com/asr/flash/v1/" - for t in param: - if "appid" in t: - signstr += str(t[1]) - break - signstr += "?" - for x in param: - tmp = x - if "appid" in x: - continue - for t in tmp: - signstr += str(t) - signstr += "=" - signstr = signstr[:-1] - signstr += "&" - signstr = signstr[:-1] - return signstr - - def _build_header(self): - header = {"Host": "asr.cloud.tencent.com"} - return header - - def _sign(self, signstr, secret_key): - hmacstr = hmac.new(secret_key.encode("utf-8"), signstr.encode("utf-8"), hashlib.sha1).digest() - s = base64.b64encode(hmacstr) - s = s.decode("utf-8") - return s - - def _build_req_with_signature(self, secret_key, params, header): - query = sorted(params.items(), key=operator.itemgetter(0)) - signstr = self._format_sign_string(query) - signature = self._sign(signstr, secret_key) - header["Authorization"] = signature - req_url = "https://" - req_url += signstr[4::] - return req_url - - def _create_query_arr(self, req): - return { - "appid": self.appid, - "secretid": self.credential.secret_id, - "timestamp": str(int(time.time())), - "engine_type": req.engine_type, - "voice_format": req.voice_format, - "speaker_diarization": req.speaker_diarization, - "hotword_id": req.hotword_id, - "customization_id": req.customization_id, - "filter_dirty": req.filter_dirty, - "filter_modal": req.filter_modal, - "filter_punc": req.filter_punc, - "convert_num_mode": req.convert_num_mode, - "word_info": req.word_info, - "first_channel_only": req.first_channel_only, - "reinforce_hotword": req.reinforce_hotword, - "sentence_max_length": req.sentence_max_length, - } - - def recognize(self, req, data): - header = self._build_header() - query_arr = self._create_query_arr(req) - req_url = self._build_req_with_signature(self.credential.secret_key, query_arr, header) - r = requests.post(req_url, headers=header, data=data) - return r.text diff --git a/api/core/model_runtime/model_providers/tencent/speech2text/speech2text.py b/api/core/model_runtime/model_providers/tencent/speech2text/speech2text.py deleted file mode 100644 index 5b427663ca..0000000000 --- a/api/core/model_runtime/model_providers/tencent/speech2text/speech2text.py +++ /dev/null @@ -1,86 +0,0 @@ -import json -from typing import IO, Optional - -import requests - -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeConnectionError, - InvokeError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel -from core.model_runtime.model_providers.tencent.speech2text.flash_recognizer import ( - Credential, - FlashRecognitionRequest, - FlashRecognizer, -) - - -class TencentSpeech2TextModel(Speech2TextModel): - def _invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) -> str: - """ - Invoke speech2text model - - :param model: model name - :param credentials: model credentials - :param file: audio file - :param user: unique user id - :return: text for given audio file - """ - return self._speech2text_invoke(model, credentials, file) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - audio_file_path = self._get_demo_file_path() - - with open(audio_file_path, "rb") as audio_file: - self._speech2text_invoke(model, credentials, audio_file) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _speech2text_invoke(self, model: str, credentials: dict, file: IO[bytes]) -> str: - """ - Invoke speech2text model - - :param model: model name - :param credentials: model credentials - :param file: audio file - :return: text for given audio file - """ - app_id = credentials["app_id"] - secret_id = credentials["secret_id"] - secret_key = credentials["secret_key"] - voice_format = file.voice_format if hasattr(file, "voice_format") else "mp3" - tencent_voice_recognizer = FlashRecognizer(app_id, Credential(secret_id, secret_key)) - resp = tencent_voice_recognizer.recognize(FlashRecognitionRequest(voice_format), file) - resp = json.loads(resp) - code = resp["code"] - message = resp["message"] - if code == 4002: - raise CredentialsValidateFailedError(str(message)) - elif code != 0: - return f"Tencent ASR Recognition failed with code {code} and message {message}" - return "\n".join(item["text"] for item in resp["flash_result"]) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [requests.exceptions.ConnectionError], - InvokeAuthorizationError: [CredentialsValidateFailedError], - } diff --git a/api/core/model_runtime/model_providers/tencent/speech2text/tencent.yaml b/api/core/model_runtime/model_providers/tencent/speech2text/tencent.yaml deleted file mode 100644 index 618d19ac7c..0000000000 --- a/api/core/model_runtime/model_providers/tencent/speech2text/tencent.yaml +++ /dev/null @@ -1,5 +0,0 @@ -model: tencent -model_type: speech2text -model_properties: - file_upload_limit: 25 - supported_file_extensions: flac,mp3,mp4,mpeg,mpga,m4a,ogg,wav,webm diff --git a/api/core/model_runtime/model_providers/tencent/tencent.py b/api/core/model_runtime/model_providers/tencent/tencent.py deleted file mode 100644 index 79c6f577b8..0000000000 --- a/api/core/model_runtime/model_providers/tencent/tencent.py +++ /dev/null @@ -1,26 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class TencentProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.SPEECH2TEXT) - model_instance.validate_credentials(model="tencent", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/tencent/tencent.yaml b/api/core/model_runtime/model_providers/tencent/tencent.yaml deleted file mode 100644 index 7d8d5a1866..0000000000 --- a/api/core/model_runtime/model_providers/tencent/tencent.yaml +++ /dev/null @@ -1,49 +0,0 @@ -provider: tencent -label: - zh_Hans: 腾讯云 - en_US: Tencent -icon_small: - en_US: icon_s_en.svg -icon_large: - zh_Hans: icon_l_zh.svg - en_US: icon_l_en.svg -background: "#E5E7EB" -help: - title: - en_US: Get your API key from Tencent AI - zh_Hans: 从腾讯云获取 API Key - url: - en_US: https://cloud.tencent.com/product/asr -supported_model_types: - - speech2text -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: app_id - label: - zh_Hans: APPID - en_US: APPID - type: text-input - required: true - placeholder: - zh_Hans: 在此输入您的腾讯语音识别服务的 APPID - en_US: Enter the APPID of your Tencent Cloud ASR service - - variable: secret_id - label: - zh_Hans: SecretId - en_US: SecretId - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的腾讯语音识别服务的 SecretId - en_US: Enter the SecretId of your Tencent Cloud ASR service - - variable: secret_key - label: - zh_Hans: SecretKey - en_US: SecretKey - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的腾讯语音识别服务的 SecretKey - en_US: Enter the SecretKey of your Tencent Cloud ASR service diff --git a/api/core/model_runtime/model_providers/togetherai/__init__.py b/api/core/model_runtime/model_providers/togetherai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/togetherai/_assets/togetherai.svg b/api/core/model_runtime/model_providers/togetherai/_assets/togetherai.svg deleted file mode 100644 index e9d918b15e..0000000000 --- a/api/core/model_runtime/model_providers/togetherai/_assets/togetherai.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/togetherai/_assets/togetherai_square.svg b/api/core/model_runtime/model_providers/togetherai/_assets/togetherai_square.svg deleted file mode 100644 index 16bae5235f..0000000000 --- a/api/core/model_runtime/model_providers/togetherai/_assets/togetherai_square.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/togetherai/llm/__init__.py b/api/core/model_runtime/model_providers/togetherai/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/togetherai/llm/llm.py b/api/core/model_runtime/model_providers/togetherai/llm/llm.py deleted file mode 100644 index b96d43979e..0000000000 --- a/api/core/model_runtime/model_providers/togetherai/llm/llm.py +++ /dev/null @@ -1,170 +0,0 @@ -from collections.abc import Generator -from decimal import Decimal -from typing import Optional, Union - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult -from core.model_runtime.entities.message_entities import ( - PromptMessage, - PromptMessageTool, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - DefaultParameterName, - FetchFrom, - ModelPropertyKey, - ModelType, - ParameterRule, - ParameterType, - PriceConfig, -) -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel - - -class TogetherAILargeLanguageModel(OAIAPICompatLargeLanguageModel): - def _update_endpoint_url(self, credentials: dict): - credentials["endpoint_url"] = "https://api.together.xyz/v1" - return credentials - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - cred_with_endpoint = self._update_endpoint_url(credentials=credentials) - - return super()._invoke(model, cred_with_endpoint, prompt_messages, model_parameters, tools, stop, stream, user) - - def validate_credentials(self, model: str, credentials: dict) -> None: - cred_with_endpoint = self._update_endpoint_url(credentials=credentials) - - return super().validate_credentials(model, cred_with_endpoint) - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - cred_with_endpoint = self._update_endpoint_url(credentials=credentials) - - return super()._generate( - model, cred_with_endpoint, prompt_messages, model_parameters, tools, stop, stream, user - ) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - cred_with_endpoint = self._update_endpoint_url(credentials=credentials) - REPETITION_PENALTY = "repetition_penalty" - TOP_K = "top_k" - features = [] - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - model_type=ModelType.LLM, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - features=features, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(cred_with_endpoint.get("context_size", "4096")), - ModelPropertyKey.MODE: cred_with_endpoint.get("mode"), - }, - parameter_rules=[ - ParameterRule( - name=DefaultParameterName.TEMPERATURE.value, - label=I18nObject(en_US="Temperature"), - type=ParameterType.FLOAT, - default=float(cred_with_endpoint.get("temperature", 0.7)), - min=0, - max=2, - precision=2, - ), - ParameterRule( - name=DefaultParameterName.TOP_P.value, - label=I18nObject(en_US="Top P"), - type=ParameterType.FLOAT, - default=float(cred_with_endpoint.get("top_p", 1)), - min=0, - max=1, - precision=2, - ), - ParameterRule( - name=TOP_K, - label=I18nObject(en_US="Top K"), - type=ParameterType.INT, - default=int(cred_with_endpoint.get("top_k", 50)), - min=-2147483647, - max=2147483647, - precision=0, - ), - ParameterRule( - name=REPETITION_PENALTY, - label=I18nObject(en_US="Repetition Penalty"), - type=ParameterType.FLOAT, - default=float(cred_with_endpoint.get("repetition_penalty", 1)), - min=-3.4, - max=3.4, - precision=1, - ), - ParameterRule( - name=DefaultParameterName.MAX_TOKENS.value, - label=I18nObject(en_US="Max Tokens"), - type=ParameterType.INT, - default=512, - min=1, - max=int(cred_with_endpoint.get("max_tokens_to_sample", 4096)), - ), - ParameterRule( - name=DefaultParameterName.FREQUENCY_PENALTY.value, - label=I18nObject(en_US="Frequency Penalty"), - type=ParameterType.FLOAT, - default=float(credentials.get("frequency_penalty", 0)), - min=-2, - max=2, - ), - ParameterRule( - name=DefaultParameterName.PRESENCE_PENALTY.value, - label=I18nObject(en_US="Presence Penalty"), - type=ParameterType.FLOAT, - default=float(credentials.get("presence_penalty", 0)), - min=-2, - max=2, - ), - ], - pricing=PriceConfig( - input=Decimal(cred_with_endpoint.get("input_price", 0)), - output=Decimal(cred_with_endpoint.get("output_price", 0)), - unit=Decimal(cred_with_endpoint.get("unit", 0)), - currency=cred_with_endpoint.get("currency", "USD"), - ), - ) - - if cred_with_endpoint["mode"] == "chat": - entity.model_properties[ModelPropertyKey.MODE] = LLMMode.CHAT.value - elif cred_with_endpoint["mode"] == "completion": - entity.model_properties[ModelPropertyKey.MODE] = LLMMode.COMPLETION.value - else: - raise ValueError(f"Unknown completion type {cred_with_endpoint['completion_type']}") - - return entity - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - cred_with_endpoint = self._update_endpoint_url(credentials=credentials) - - return super().get_num_tokens(model, cred_with_endpoint, prompt_messages, tools) diff --git a/api/core/model_runtime/model_providers/togetherai/togetherai.py b/api/core/model_runtime/model_providers/togetherai/togetherai.py deleted file mode 100644 index aa4100a7c9..0000000000 --- a/api/core/model_runtime/model_providers/togetherai/togetherai.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class TogetherAIProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/togetherai/togetherai.yaml b/api/core/model_runtime/model_providers/togetherai/togetherai.yaml deleted file mode 100644 index e69471b15d..0000000000 --- a/api/core/model_runtime/model_providers/togetherai/togetherai.yaml +++ /dev/null @@ -1,75 +0,0 @@ -provider: togetherai -label: - en_US: together.ai -icon_small: - en_US: togetherai_square.svg -icon_large: - en_US: togetherai.svg -background: "#F1EFED" -help: - title: - en_US: Get your API key from together.ai - zh_Hans: 从 together.ai 获取 API Key - url: - en_US: https://api.together.xyz/ -supported_model_types: - - llm -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter full model name - zh_Hans: 输入模型全称 - credential_form_schemas: - - variable: api_key - required: true - label: - en_US: API Key - type: secret-input - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: mode - show_on: - - variable: __model_type - value: llm - label: - en_US: Completion mode - type: select - required: false - default: chat - placeholder: - zh_Hans: 选择对话类型 - en_US: Select completion mode - options: - - value: completion - label: - en_US: Completion - zh_Hans: 补全 - - value: chat - label: - en_US: Chat - zh_Hans: 对话 - - variable: context_size - label: - zh_Hans: 模型上下文长度 - en_US: Model context size - required: true - type: text-input - default: '4096' - placeholder: - zh_Hans: 在此输入您的模型上下文长度 - en_US: Enter your Model context size - - variable: max_tokens_to_sample - label: - zh_Hans: 最大 token 上限 - en_US: Upper bound for max tokens - show_on: - - variable: __model_type - value: llm - default: '4096' - type: text-input diff --git a/api/core/model_runtime/model_providers/tongyi/__init__.py b/api/core/model_runtime/model_providers/tongyi/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/tongyi/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/tongyi/_assets/icon_l_en.png deleted file mode 100644 index 94de01136a..0000000000 Binary files a/api/core/model_runtime/model_providers/tongyi/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/tongyi/_assets/icon_l_zh.png b/api/core/model_runtime/model_providers/tongyi/_assets/icon_l_zh.png deleted file mode 100644 index bd8f2762d1..0000000000 Binary files a/api/core/model_runtime/model_providers/tongyi/_assets/icon_l_zh.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/tongyi/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/tongyi/_assets/icon_s_en.png deleted file mode 100644 index c1aff40ee0..0000000000 Binary files a/api/core/model_runtime/model_providers/tongyi/_assets/icon_s_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/tongyi/_common.py b/api/core/model_runtime/model_providers/tongyi/_common.py deleted file mode 100644 index 8a50c7aa05..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/_common.py +++ /dev/null @@ -1,55 +0,0 @@ -from dashscope.common.error import ( - AuthenticationError, - InvalidParameter, - RequestFailure, - ServiceUnavailableError, - UnsupportedHTTPMethod, - UnsupportedModel, -) - -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) - - -class _CommonTongyi: - @staticmethod - def _to_credential_kwargs(credentials: dict) -> dict: - credentials_kwargs = { - "dashscope_api_key": credentials["dashscope_api_key"], - } - - return credentials_kwargs - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [ - RequestFailure, - ], - InvokeServerUnavailableError: [ - ServiceUnavailableError, - ], - InvokeRateLimitError: [], - InvokeAuthorizationError: [ - AuthenticationError, - ], - InvokeBadRequestError: [ - InvalidParameter, - UnsupportedModel, - UnsupportedHTTPMethod, - ], - } diff --git a/api/core/model_runtime/model_providers/tongyi/llm/__init__.py b/api/core/model_runtime/model_providers/tongyi/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/tongyi/llm/_position.yaml b/api/core/model_runtime/model_providers/tongyi/llm/_position.yaml deleted file mode 100644 index 8ce336d60c..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/_position.yaml +++ /dev/null @@ -1,51 +0,0 @@ -- qwen-vl-max-0809 -- qwen-vl-max-0201 -- qwen-vl-max -- qwen-max-latest -- qwen-max-1201 -- qwen-max-0919 -- qwen-max-0428 -- qwen-max-0403 -- qwen-max-0107 -- qwen-max -- qwen-max-longcontext -- qwen-plus-latest -- qwen-plus-0919 -- qwen-plus-0806 -- qwen-plus-0723 -- qwen-plus-0624 -- qwen-plus-0206 -- qwen-plus-chat -- qwen-plus -- qwen-vl-plus-0809 -- qwen-vl-plus-0201 -- qwen-vl-plus -- qwen-turbo-latest -- qwen-turbo-0919 -- qwen-turbo-0624 -- qwen-turbo-0206 -- qwen-turbo-chat -- qwen-turbo -- qwen2.5-72b-instruct -- qwen2.5-32b-instruct -- qwen2.5-14b-instruct -- qwen2.5-7b-instruct -- qwen2.5-3b-instruct -- qwen2.5-1.5b-instruct -- qwen2.5-0.5b-instruct -- qwen2.5-coder-7b-instruct -- qwen2-math-72b-instruct -- qwen2-math-7b-instruct -- qwen2-math-1.5b-instruct -- qwen-long -- qwen-math-plus-latest -- qwen-math-plus-0919 -- qwen-math-plus-0816 -- qwen-math-plus -- qwen-math-turbo-latest -- qwen-math-turbo-0919 -- qwen-math-turbo -- qwen-coder-turbo-latest -- qwen-coder-turbo-0919 -- qwen-coder-turbo -- farui-plus diff --git a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml deleted file mode 100644 index 34a57d1fc0..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml +++ /dev/null @@ -1,77 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: farui-plus -label: - en_US: farui-plus -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 12288 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.02' - output: '0.02' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/llm.py b/api/core/model_runtime/model_providers/tongyi/llm/llm.py deleted file mode 100644 index 3e3585b30a..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/llm.py +++ /dev/null @@ -1,593 +0,0 @@ -import base64 -import os -import tempfile -import uuid -from collections.abc import Generator -from http import HTTPStatus -from pathlib import Path -from typing import Optional, Union, cast - -from dashscope import Generation, MultiModalConversation, get_tokenizer -from dashscope.api_entities.dashscope_response import GenerationResponse -from dashscope.common.error import ( - AuthenticationError, - InvalidParameter, - RequestFailure, - ServiceUnavailableError, - UnsupportedHTTPMethod, - UnsupportedModel, -) - -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - I18nObject, - ModelFeature, - ModelPropertyKey, - ModelType, - ParameterRule, - ParameterType, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel - - -class TongyiLargeLanguageModel(LargeLanguageModel): - tokenizers = {} - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # invoke model without code wrapper - return self._generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: - """ - # Check if the model was added via get_customizable_model_schema - if self.get_customizable_model_schema(model, credentials) is not None: - # For custom models, tokens are not calculated. - return 0 - - if model in {"qwen-turbo-chat", "qwen-plus-chat"}: - model = model.replace("-chat", "") - if model == "farui-plus": - model = "qwen-farui-plus" - - if model in self.tokenizers: - tokenizer = self.tokenizers[model] - else: - tokenizer = get_tokenizer(model) - self.tokenizers[model] = tokenizer - - # convert string to token ids - tokens = tokenizer.encode(self._convert_messages_to_prompt(prompt_messages)) - - return len(tokens) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._generate( - model=model, - credentials=credentials, - prompt_messages=[ - UserPromptMessage(content="ping"), - ], - model_parameters={ - "temperature": 0.5, - }, - stream=False, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - - mode = self.get_model_mode(model, credentials) - - if model in {"qwen-turbo-chat", "qwen-plus-chat"}: - model = model.replace("-chat", "") - - extra_model_kwargs = {} - if tools: - extra_model_kwargs["tools"] = self._convert_tools(tools) - - if stop: - extra_model_kwargs["stop"] = stop - - params = { - "model": model, - **model_parameters, - **credentials_kwargs, - **extra_model_kwargs, - } - - model_schema = self.get_model_schema(model, credentials) - if ModelFeature.VISION in (model_schema.features or []): - params["messages"] = self._convert_prompt_messages_to_tongyi_messages(prompt_messages, rich_content=True) - - response = MultiModalConversation.call(**params, stream=stream) - else: - # nothing different between chat model and completion model in tongyi - params["messages"] = self._convert_prompt_messages_to_tongyi_messages(prompt_messages) - response = Generation.call(**params, result_format="message", stream=stream) - - if stream: - return self._handle_generate_stream_response(model, credentials, response, prompt_messages) - - return self._handle_generate_response(model, credentials, response, prompt_messages) - - def _handle_generate_response( - self, model: str, credentials: dict, response: GenerationResponse, prompt_messages: list[PromptMessage] - ) -> LLMResult: - """ - Handle llm response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response - """ - if response.status_code not in {200, HTTPStatus.OK}: - raise ServiceUnavailableError(response.message) - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=response.output.choices[0].message.content, - ) - - # transform usage - usage = self._calc_response_usage(model, credentials, response.usage.input_tokens, response.usage.output_tokens) - - # transform response - result = LLMResult( - model=model, - message=assistant_prompt_message, - prompt_messages=prompt_messages, - usage=usage, - ) - - return result - - def _handle_generate_stream_response( - self, - model: str, - credentials: dict, - responses: Generator[GenerationResponse, None, None], - prompt_messages: list[PromptMessage], - ) -> Generator: - """ - Handle llm stream response - - :param model: model name - :param credentials: credentials - :param responses: response - :param prompt_messages: prompt messages - :return: llm response chunk generator result - """ - full_text = "" - tool_calls = [] - for index, response in enumerate(responses): - if response.status_code not in {200, HTTPStatus.OK}: - raise ServiceUnavailableError( - f"Failed to invoke model {model}, status code: {response.status_code}, " - f"message: {response.message}" - ) - - resp_finish_reason = response.output.choices[0].finish_reason - - if resp_finish_reason is not None and resp_finish_reason != "null": - resp_content = response.output.choices[0].message.content - - assistant_prompt_message = AssistantPromptMessage( - content="", - ) - - if "tool_calls" in response.output.choices[0].message: - tool_calls = response.output.choices[0].message["tool_calls"] - elif resp_content: - # special for qwen-vl - if isinstance(resp_content, list): - resp_content = resp_content[0]["text"] - - # transform assistant message to prompt message - assistant_prompt_message.content = resp_content.replace(full_text, "", 1) - - full_text = resp_content - - if tool_calls: - message_tool_calls = [] - for tool_call_obj in tool_calls: - message_tool_call = AssistantPromptMessage.ToolCall( - id=tool_call_obj["function"]["name"], - type="function", - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=tool_call_obj["function"]["name"], arguments=tool_call_obj["function"]["arguments"] - ), - ) - message_tool_calls.append(message_tool_call) - - assistant_prompt_message.tool_calls = message_tool_calls - - # transform usage - usage = response.usage - usage = self._calc_response_usage(model, credentials, usage.input_tokens, usage.output_tokens) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, message=assistant_prompt_message, finish_reason=resp_finish_reason, usage=usage - ), - ) - else: - resp_content = response.output.choices[0].message.content - if not resp_content: - if "tool_calls" in response.output.choices[0].message: - tool_calls = response.output.choices[0].message["tool_calls"] - continue - - # special for qwen-vl - if isinstance(resp_content, list): - resp_content = resp_content[0]["text"] - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=resp_content.replace(full_text, "", 1), - ) - - full_text = resp_content - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta(index=index, message=assistant_prompt_message), - ) - - def _to_credential_kwargs(self, credentials: dict) -> dict: - """ - Transform credentials to kwargs for model instance - - :param credentials: - :return: - """ - credentials_kwargs = { - "api_key": credentials["dashscope_api_key"], - } - - return credentials_kwargs - - def _convert_one_message_to_text(self, message: PromptMessage) -> str: - """ - Convert a single message to a string. - - :param message: PromptMessage to convert. - :return: String representation of the message. - """ - human_prompt = "\n\nHuman:" - ai_prompt = "\n\nAssistant:" - content = message.content - - if isinstance(message, UserPromptMessage): - if isinstance(content, str): - message_text = f"{human_prompt} {content}" - else: - message_text = "" - for sub_message in content: - if sub_message.type == PromptMessageContentType.TEXT: - message_text = f"{human_prompt} {sub_message.data}" - break - elif isinstance(message, AssistantPromptMessage): - message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage | ToolPromptMessage): - message_text = content - else: - raise ValueError(f"Got unknown type {message}") - - return message_text - - def _convert_messages_to_prompt(self, messages: list[PromptMessage]) -> str: - """ - Format a list of messages into a full prompt for the Anthropic model - - :param messages: List of PromptMessage to combine. - :return: Combined string with necessary human_prompt and ai_prompt tags. - """ - messages = messages.copy() # don't mutate the original list - - text = "".join(self._convert_one_message_to_text(message) for message in messages) - - # trim off the trailing ' ' that might come from the "Assistant: " - return text.rstrip() - - def _convert_prompt_messages_to_tongyi_messages( - self, prompt_messages: list[PromptMessage], rich_content: bool = False - ) -> list[dict]: - """ - Convert prompt messages to tongyi messages - - :param prompt_messages: prompt messages - :return: tongyi messages - """ - tongyi_messages = [] - for prompt_message in prompt_messages: - if isinstance(prompt_message, SystemPromptMessage): - tongyi_messages.append( - { - "role": "system", - "content": prompt_message.content if not rich_content else [{"text": prompt_message.content}], - } - ) - elif isinstance(prompt_message, UserPromptMessage): - if isinstance(prompt_message.content, str): - tongyi_messages.append( - { - "role": "user", - "content": prompt_message.content - if not rich_content - else [{"text": prompt_message.content}], - } - ) - else: - sub_messages = [] - for message_content in prompt_message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - sub_message_dict = {"text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - - image_url = message_content.data - if message_content.data.startswith("data:"): - # convert image base64 data to file in /tmp - image_url = self._save_base64_image_to_file(message_content.data) - - sub_message_dict = {"image": image_url} - sub_messages.append(sub_message_dict) - - # resort sub_messages to ensure text is always at last - sub_messages = sorted(sub_messages, key=lambda x: "text" in x) - - tongyi_messages.append({"role": "user", "content": sub_messages}) - elif isinstance(prompt_message, AssistantPromptMessage): - content = prompt_message.content - if not content: - content = " " - message = {"role": "assistant", "content": content if not rich_content else [{"text": content}]} - if prompt_message.tool_calls: - message["tool_calls"] = [tool_call.model_dump() for tool_call in prompt_message.tool_calls] - tongyi_messages.append(message) - elif isinstance(prompt_message, ToolPromptMessage): - tongyi_messages.append( - {"role": "tool", "content": prompt_message.content, "name": prompt_message.tool_call_id} - ) - else: - raise ValueError(f"Got unknown type {prompt_message}") - - return tongyi_messages - - def _save_base64_image_to_file(self, base64_image: str) -> str: - """ - Save base64 image to file - 'data:{upload_file.mime_type};base64,{encoded_string}' - - :param base64_image: base64 image data - :return: image file path - """ - # get mime type and encoded string - mime_type, encoded_string = base64_image.split(",")[0].split(";")[0].split(":")[1], base64_image.split(",")[1] - - # save image to file - temp_dir = tempfile.gettempdir() - - file_path = os.path.join(temp_dir, f"{uuid.uuid4()}.{mime_type.split('/')[1]}") - - Path(file_path).write_bytes(base64.b64decode(encoded_string)) - - return f"file://{file_path}" - - def _convert_tools(self, tools: list[PromptMessageTool]) -> list[dict]: - """ - Convert tools - """ - tool_definitions = [] - for tool in tools: - properties = tool.parameters["properties"] - required_properties = tool.parameters["required"] - - properties_definitions = {} - for p_key, p_val in properties.items(): - desc = p_val["description"] - if "enum" in p_val: - desc += f"; Only accepts one of the following predefined options: [{', '.join(p_val['enum'])}]" - - properties_definitions[p_key] = { - "description": desc, - "type": p_val["type"], - } - - tool_definition = { - "type": "function", - "function": { - "name": tool.name, - "description": tool.description, - "parameters": properties_definitions, - "required": required_properties, - }, - } - - tool_definitions.append(tool_definition) - - return tool_definitions - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [ - RequestFailure, - ], - InvokeServerUnavailableError: [ - ServiceUnavailableError, - ], - InvokeRateLimitError: [], - InvokeAuthorizationError: [ - AuthenticationError, - ], - InvokeBadRequestError: [ - InvalidParameter, - UnsupportedModel, - UnsupportedHTTPMethod, - ], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - Architecture for defining customizable models - - :param model: model name - :param credentials: model credentials - :return: AIModelEntity or None - """ - return AIModelEntity( - model=model, - label=I18nObject(en_US=model, zh_Hans=model), - model_type=ModelType.LLM, - features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL, ModelFeature.STREAM_TOOL_CALL] - if credentials.get("function_calling_type") == "tool_call" - else [], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 8000)), - ModelPropertyKey.MODE: LLMMode.CHAT.value, - }, - parameter_rules=[ - ParameterRule( - name="temperature", - use_template="temperature", - label=I18nObject(en_US="Temperature", zh_Hans="温度"), - type=ParameterType.FLOAT, - ), - ParameterRule( - name="max_tokens", - use_template="max_tokens", - default=512, - min=1, - max=int(credentials.get("max_tokens", 1024)), - label=I18nObject(en_US="Max Tokens", zh_Hans="最大标记"), - type=ParameterType.INT, - ), - ParameterRule( - name="top_p", - use_template="top_p", - label=I18nObject(en_US="Top P", zh_Hans="Top P"), - type=ParameterType.FLOAT, - ), - ParameterRule( - name="top_k", - use_template="top_k", - label=I18nObject(en_US="Top K", zh_Hans="Top K"), - type=ParameterType.FLOAT, - ), - ParameterRule( - name="frequency_penalty", - use_template="frequency_penalty", - label=I18nObject(en_US="Frequency Penalty", zh_Hans="重复惩罚"), - type=ParameterType.FLOAT, - ), - ], - ) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml deleted file mode 100644 index 64a3f33133..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-coder-turbo-0919 -label: - en_US: qwen-coder-turbo-0919 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.002' - output: '0.006' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml deleted file mode 100644 index a4c93f7047..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-coder-turbo-latest -label: - en_US: qwen-coder-turbo-latest -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.002' - output: '0.006' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml deleted file mode 100644 index ff68faed80..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-coder-turbo -label: - en_US: qwen-coder-turbo -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.002' - output: '0.006' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml deleted file mode 100644 index c3dbb3616f..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml +++ /dev/null @@ -1,77 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-long -label: - en_US: qwen-long -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 10000000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 6000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.0005' - output: '0.002' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml deleted file mode 100644 index 42fe1f6862..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-math-plus-0816 -label: - en_US: qwen-math-plus-0816 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 3072 - min: 1 - max: 3072 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml deleted file mode 100644 index 9b6567b8cd..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-math-plus-0919 -label: - en_US: qwen-math-plus-0919 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 3072 - min: 1 - max: 3072 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml deleted file mode 100644 index b2a2393b36..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-math-plus-latest -label: - en_US: qwen-math-plus-latest -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 3072 - min: 1 - max: 3072 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml deleted file mode 100644 index 63f4b7ff0a..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-math-plus -label: - en_US: qwen-math-plus -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 3072 - min: 1 - max: 3072 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml deleted file mode 100644 index 4da90eec3e..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-math-turbo-0919 -label: - en_US: qwen-math-turbo-0919 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 3072 - min: 1 - max: 3072 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.002' - output: '0.006' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml deleted file mode 100644 index d29f8851dd..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-math-turbo-latest -label: - en_US: qwen-math-turbo-latest -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 3072 - min: 1 - max: 3072 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.002' - output: '0.006' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml deleted file mode 100644 index 2a8f7f725e..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-math-turbo -label: - en_US: qwen-math-turbo -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 3072 - min: 1 - max: 3072 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.002' - output: '0.006' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml deleted file mode 100644 index ef1841b517..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# this model corresponds to qwen-max, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) -model: qwen-max-0107 -label: - en_US: qwen-max-0107 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.04' - output: '0.12' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml deleted file mode 100644 index a2ea5df130..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# this model corresponds to qwen-max-0403, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) -model: qwen-max-0403 -label: - en_US: qwen-max-0403 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.04' - output: '0.12' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml deleted file mode 100644 index a467665f11..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# this model corresponds to qwen-max-0428, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) -model: qwen-max-0428 -label: - en_US: qwen-max-0428 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.04' - output: '0.12' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml deleted file mode 100644 index 78661eaea0..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# this model corresponds to qwen-max-0919, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) -model: qwen-max-0919 -label: - en_US: qwen-max-0919 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.02' - output: '0.06' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml deleted file mode 100644 index 6f4674576b..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# this model corresponds to qwen-max, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) -model: qwen-max-1201 -label: - en_US: qwen-max-1201 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.04' - output: '0.12' - unit: '0.001' - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml deleted file mode 100644 index 8b5f005473..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# this model corresponds to qwen-max, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) -model: qwen-max-latest -label: - en_US: qwen-max-latest -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.02' - output: '0.06' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml deleted file mode 100644 index 098494ff95..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# this model corresponds to qwen-max, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) -model: qwen-max-longcontext -label: - en_US: qwen-max-longcontext -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8000 - min: 1 - max: 8000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.04' - output: '0.12' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml deleted file mode 100644 index 9d0d3f8db3..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml +++ /dev/null @@ -1,87 +0,0 @@ -# this model corresponds to qwen-max, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf) -model: qwen-max -label: - en_US: qwen-max -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - - name: response_format - use_template: response_format -pricing: - input: '0.02' - output: '0.06' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml deleted file mode 100644 index 0b1a6f81df..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# this model corresponds to qwen-plus-0206, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) -model: qwen-plus-0206 -label: - en_US: qwen-plus-0206 -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8000 - min: 1 - max: 8000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml deleted file mode 100644 index 7706005bb5..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# this model corresponds to qwen-plus-0624, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) -model: qwen-plus-0624 -label: - en_US: qwen-plus-0624 -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8000 - min: 1 - max: 8000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml deleted file mode 100644 index 348276fc08..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# this model corresponds to qwen-plus-0723, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) -model: qwen-plus-0723 -label: - en_US: qwen-plus-0723 -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8000 - min: 1 - max: 8000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml deleted file mode 100644 index 29f125135e..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# this model corresponds to qwen-plus-0806, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) -model: qwen-plus-0806 -label: - en_US: qwen-plus-0806 -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml deleted file mode 100644 index 905fa1e102..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# this model corresponds to qwen-plus-0919, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) -model: qwen-plus-0919 -label: - en_US: qwen-plus-0919 -model_type: llm -features: - - agent-thought -model_properties: - mode: completion - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.0008' - output: '0.002' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml deleted file mode 100644 index c7a3549727..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# this model corresponds to qwen-plus, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) -model: qwen-plus-chat -label: - en_US: qwen-plus-chat -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml deleted file mode 100644 index 608f52c296..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# this model corresponds to qwen-plus-latest, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) -model: qwen-plus-latest -label: - en_US: qwen-plus-latest -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.0008' - output: '0.002' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml deleted file mode 100644 index 9089e57255..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml +++ /dev/null @@ -1,87 +0,0 @@ -# this model corresponds to qwen-plus, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk) -model: qwen-plus -label: - en_US: qwen-plus -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - - name: response_format - use_template: response_format -pricing: - input: '0.0008' - output: '0.002' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml deleted file mode 100644 index 7ee0d44f2f..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml +++ /dev/null @@ -1,77 +0,0 @@ -# this model corresponds to qwen-turbo-0206, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub) - -model: qwen-turbo-0206 -label: - en_US: qwen-turbo-0206 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.002' - output: '0.006' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml deleted file mode 100644 index 20a3f7eb64..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# this model corresponds to qwen-turbo-0624, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub) -model: qwen-turbo-0624 -label: - en_US: qwen-turbo-0624 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.002' - output: '0.006' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml deleted file mode 100644 index ba73dec363..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# this model corresponds to qwen-turbo-0919, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub) -model: qwen-turbo-0919 -label: - en_US: qwen-turbo-0919 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.0003' - output: '0.0006' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml deleted file mode 100644 index d785b7fe85..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# this model corresponds to qwen-turbo, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub) -model: qwen-turbo-chat -label: - en_US: qwen-turbo-chat -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 1500 - min: 1 - max: 1500 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.002' - output: '0.006' - unit: '0.001' - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml deleted file mode 100644 index fe38a4283c..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# this model corresponds to qwen-turbo-latest, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub) -model: qwen-turbo-latest -label: - en_US: qwen-turbo-latest -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.0006' - output: '0.0003' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml deleted file mode 100644 index 215c9ec5fc..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml +++ /dev/null @@ -1,87 +0,0 @@ -# this model corresponds to qwen-turbo, for more details -# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub) -model: qwen-turbo -label: - en_US: qwen-turbo -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: enable_search - type: boolean - default: false - label: - zh_Hans: 联网搜索 - en_US: Web Search - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - - name: response_format - use_template: response_format -pricing: - input: '0.0006' - output: '0.0003' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml deleted file mode 100644 index d80168ffc3..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml +++ /dev/null @@ -1,49 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-vl-max-0201 -label: - en_US: qwen-vl-max-0201 -model_type: llm -features: - - vision - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: response_format - use_template: response_format -pricing: - input: '0.02' - output: '0.02' - unit: '0.001' - currency: RMB -deprecated: true diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml deleted file mode 100644 index 50e10226a5..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-vl-max-0809 -label: - en_US: qwen-vl-max-0809 -model_type: llm -features: - - vision - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: max_tokens - required: false - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: response_format - use_template: response_format - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.02' - output: '0.02' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml deleted file mode 100644 index 21b127f56c..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-vl-max -label: - en_US: qwen-vl-max -model_type: llm -features: - - vision - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: max_tokens - required: false - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: response_format - use_template: response_format - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.02' - output: '0.02' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml deleted file mode 100644 index 03cb039d15..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-vl-plus-0201 -label: - en_US: qwen-vl-plus-0201 -model_type: llm -features: - - vision - - agent-thought -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: max_tokens - required: false - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: response_format - use_template: response_format - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.02' - output: '0.02' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml deleted file mode 100644 index 67b2b2ebdd..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-vl-plus-0809 -label: - en_US: qwen-vl-plus-0809 -model_type: llm -features: - - vision - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: max_tokens - required: false - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: response_format - use_template: response_format - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.008' - output: '0.008' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml deleted file mode 100644 index f55764c6c0..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen-vl-plus -label: - en_US: qwen-vl-plus -model_type: llm -features: - - vision - - agent-thought -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: max_tokens - required: false - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: response_format - use_template: response_format - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.008' - output: '0.008' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml deleted file mode 100644 index ea157f42de..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen2-math-1.5b-instruct -label: - en_US: qwen2-math-1.5b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml deleted file mode 100644 index 37052a9233..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen2-math-72b-instruct -label: - en_US: qwen2-math-72b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml deleted file mode 100644 index e182f1c27f..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen2-math-7b-instruct -label: - en_US: qwen2-math-7b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml deleted file mode 100644 index 9e75ccc1f2..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen2.5-0.5b-instruct -label: - en_US: qwen2.5-0.5b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.000' - output: '0.000' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml deleted file mode 100644 index 67c9d31243..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen2.5-1.5b-instruct -label: - en_US: qwen2.5-1.5b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.000' - output: '0.000' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml deleted file mode 100644 index 2a38be921c..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen2.5-14b-instruct -label: - en_US: qwen2.5-14b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.002' - output: '0.006' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml deleted file mode 100644 index e6e4fbf978..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen2.5-32b-instruct -label: - en_US: qwen2.5-32b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.0035' - output: '0.007' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml deleted file mode 100644 index 8f250379a7..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen2.5-3b-instruct -label: - en_US: qwen2.5-3b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.000' - output: '0.000' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml deleted file mode 100644 index bb3cdd6141..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen2.5-72b-instruct -label: - en_US: qwen2.5-72b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.004' - output: '0.012' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml deleted file mode 100644 index fdcd3d4275..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen2.5-7b-instruct -label: - en_US: qwen2.5-7b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.001' - output: '0.002' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml deleted file mode 100644 index 7ebeec3953..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models -model: qwen2.5-coder-7b-instruct -label: - en_US: qwen2.5-coder-7b-instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0.001' - output: '0.002' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/__init__.py b/api/core/model_runtime/model_providers/tongyi/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml deleted file mode 100644 index 52e35d8b50..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models#3383780daf8hw -model: text-embedding-v1 -model_type: text-embedding -model_properties: - context_size: 2048 - max_chunks: 25 -pricing: - input: "0.0007" - unit: "0.001" - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml deleted file mode 100644 index 5bb6a8f424..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models#3383780daf8hw -model: text-embedding-v2 -model_type: text-embedding -model_properties: - context_size: 2048 - max_chunks: 25 -pricing: - input: "0.0007" - unit: "0.001" - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v3.yaml b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v3.yaml deleted file mode 100644 index d8af0e2b63..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v3.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models#3383780daf8hw -model: text-embedding-v3 -model_type: text-embedding -model_properties: - context_size: 8192 - max_chunks: 25 -pricing: - input: "0.0007" - unit: "0.001" - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/tongyi.py b/api/core/model_runtime/model_providers/tongyi/tongyi.py deleted file mode 100644 index a084512de9..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/tongyi.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class TongyiProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `qwen-turbo` model for validate, - model_instance.validate_credentials(model="qwen-turbo", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/tongyi/tongyi.yaml b/api/core/model_runtime/model_providers/tongyi/tongyi.yaml deleted file mode 100644 index 1a09c20fd9..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/tongyi.yaml +++ /dev/null @@ -1,87 +0,0 @@ -provider: tongyi -label: - zh_Hans: 通义千问 - en_US: TONGYI -icon_small: - en_US: icon_s_en.png -icon_large: - zh_Hans: icon_l_zh.png - en_US: icon_l_en.png -background: "#EFF1FE" -help: - title: - en_US: Get your API key from AliCloud - zh_Hans: 从阿里云百炼获取 API Key - url: - en_US: https://bailian.console.aliyun.com/?apiKey=1#/api-key -supported_model_types: - - llm - - tts - - text-embedding -configurate_methods: - - predefined-model - - customizable-model -provider_credential_schema: - credential_form_schemas: - - variable: dashscope_api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: dashscope_api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: context_size - label: - zh_Hans: 模型上下文长度 - en_US: Model context size - required: true - type: text-input - default: '4096' - placeholder: - zh_Hans: 在此输入您的模型上下文长度 - en_US: Enter your Model context size - - variable: max_tokens - label: - zh_Hans: 最大 token 上限 - en_US: Upper bound for max tokens - default: '4096' - type: text-input - show_on: - - variable: __model_type - value: llm - - variable: function_calling_type - label: - en_US: Function calling - type: select - required: false - default: no_call - options: - - value: no_call - label: - en_US: Not Support - zh_Hans: 不支持 - - value: function_call - label: - en_US: Support - zh_Hans: 支持 - show_on: - - variable: __model_type - value: llm diff --git a/api/core/model_runtime/model_providers/tongyi/tts/__init__.py b/api/core/model_runtime/model_providers/tongyi/tts/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/tongyi/tts/tts-1.yaml b/api/core/model_runtime/model_providers/tongyi/tts/tts-1.yaml deleted file mode 100644 index 4eaa0ff361..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/tts/tts-1.yaml +++ /dev/null @@ -1,139 +0,0 @@ -model: tts-1 -model_type: tts -model_properties: - default_voice: 'sambert-zhiru-v1' - voices: - - mode: "sambert-zhinan-v1" - name: "知楠(广告男声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhiqi-v1" - name: "知琪(温柔女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhichu-v1" - name: "知厨(新闻播报)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhide-v1" - name: "知德(新闻男声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhijia-v1" - name: "知佳(标准女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhiru-v1" - name: "知茹(新闻女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhiqian-v1" - name: "知倩(配音解说、新闻播报)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhixiang-v1" - name: "知祥(配音解说)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhiwei-v1" - name: "知薇(萝莉女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhihao-v1" - name: "知浩(咨询男声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhijing-v1" - name: "知婧(严厉女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhiming-v1" - name: "知茗(诙谐男声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhimo-v1" - name: "知墨(情感男声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhina-v1" - name: "知娜(浙普女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhishu-v1" - name: "知树(资讯男声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhistella-v1" - name: "知莎(知性女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhiting-v1" - name: "知婷(电台女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhixiao-v1" - name: "知笑(资讯女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhiya-v1" - name: "知雅(严厉女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhiye-v1" - name: "知晔(青年男声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhiying-v1" - name: "知颖(软萌童声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhiyuan-v1" - name: "知媛(知心姐姐)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhigui-v1" - name: "知柜(直播女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhishuo-v1" - name: "知硕(自然男声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhimiao-emo-v1" - name: "知妙(多种情感女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhimao-v1" - name: "知猫(直播女声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhilun-v1" - name: "知伦(悬疑解说)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhifei-v1" - name: "知飞(激昂解说)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-zhida-v1" - name: "知达(标准男声)" - language: [ "zh-Hans", "en-US" ] - - mode: "sambert-camila-v1" - name: "Camila(西班牙语女声)" - language: [ "es-ES" ] - - mode: "sambert-perla-v1" - name: "Perla(意大利语女声)" - language: [ "it-IT" ] - - mode: "sambert-indah-v1" - name: "Indah(印尼语女声)" - language: [ "id-ID" ] - - mode: "sambert-clara-v1" - name: "Clara(法语女声)" - language: [ "fr-FR" ] - - mode: "sambert-hanna-v1" - name: "Hanna(德语女声)" - language: [ "de-DE" ] - - mode: "sambert-beth-v1" - name: "Beth(咨询女声)" - language: [ "en-US" ] - - mode: "sambert-betty-v1" - name: "Betty(客服女声)" - language: [ "en-US" ] - - mode: "sambert-cally-v1" - name: "Cally(自然女声)" - language: [ "en-US" ] - - mode: "sambert-cindy-v1" - name: "Cindy(对话女声)" - language: [ "en-US" ] - - mode: "sambert-eva-v1" - name: "Eva(陪伴女声)" - language: [ "en-US" ] - - mode: "sambert-donna-v1" - name: "Donna(教育女声)" - language: [ "en-US" ] - - mode: "sambert-brian-v1" - name: "Brian(客服男声)" - language: [ "en-US" ] - - mode: "sambert-waan-v1" - name: "Waan(泰语女声)" - language: [ "th-TH" ] - word_limit: 7000 - audio_type: 'mp3' - max_workers: 5 -pricing: - input: '1' - output: '0' - unit: '0.0001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/tts/tts.py b/api/core/model_runtime/model_providers/tongyi/tts/tts.py deleted file mode 100644 index 48a38897a8..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/tts/tts.py +++ /dev/null @@ -1,152 +0,0 @@ -import threading -from queue import Queue -from typing import Optional - -import dashscope -from dashscope import SpeechSynthesizer -from dashscope.api_entities.dashscope_response import SpeechSynthesisResponse -from dashscope.audio.tts import ResultCallback, SpeechSynthesisResult - -from core.model_runtime.errors.invoke import InvokeBadRequestError -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.tts_model import TTSModel -from core.model_runtime.model_providers.tongyi._common import _CommonTongyi - - -class TongyiText2SpeechModel(_CommonTongyi, TTSModel): - """ - Model class for Tongyi Speech to text model. - """ - - def _invoke( - self, model: str, tenant_id: str, credentials: dict, content_text: str, voice: str, user: Optional[str] = None - ) -> any: - """ - _invoke text2speech model - - :param model: model name - :param tenant_id: user tenant id - :param credentials: model credentials - :param voice: model timbre - :param content_text: text content to be translated - :param user: unique user id - :return: text translated to audio file - """ - if not voice or voice not in [ - d["value"] for d in self.get_tts_model_voices(model=model, credentials=credentials) - ]: - voice = self._get_model_default_voice(model, credentials) - - return self._tts_invoke_streaming(model=model, credentials=credentials, content_text=content_text, voice=voice) - - def validate_credentials(self, model: str, credentials: dict, user: Optional[str] = None) -> None: - """ - validate credentials text2speech model - - :param model: model name - :param credentials: model credentials - :param user: unique user id - :return: text translated to audio file - """ - try: - self._tts_invoke_streaming( - model=model, - credentials=credentials, - content_text="Hello Dify!", - voice=self._get_model_default_voice(model, credentials), - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str, voice: str) -> any: - """ - _tts_invoke_streaming text2speech model - - :param model: model name - :param credentials: model credentials - :param voice: model timbre - :param content_text: text content to be translated - :return: text translated to audio file - """ - word_limit = self._get_model_word_limit(model, credentials) - audio_type = self._get_model_audio_type(model, credentials) - try: - audio_queue: Queue = Queue() - callback = Callback(queue=audio_queue) - - def invoke_remote(content, v, api_key, cb, at, wl): - if len(content) < word_limit: - sentences = [content] - else: - sentences = list(self._split_text_into_sentences(org_text=content, max_length=wl)) - for sentence in sentences: - SpeechSynthesizer.call( - model=v, - sample_rate=16000, - api_key=api_key, - text=sentence.strip(), - callback=cb, - format=at, - word_timestamp_enabled=True, - phoneme_timestamp_enabled=True, - ) - - threading.Thread( - target=invoke_remote, - args=(content_text, voice, credentials.get("dashscope_api_key"), callback, audio_type, word_limit), - ).start() - - while True: - audio = audio_queue.get() - if audio is None: - break - yield audio - - except Exception as ex: - raise InvokeBadRequestError(str(ex)) - - @staticmethod - def _process_sentence(sentence: str, credentials: dict, voice: str, audio_type: str): - """ - _tts_invoke Tongyi text2speech model api - - :param credentials: model credentials - :param sentence: text content to be translated - :param voice: model timbre - :param audio_type: audio file type - :return: text translated to audio file - """ - response = dashscope.audio.tts.SpeechSynthesizer.call( - model=voice, - sample_rate=48000, - api_key=credentials.get("dashscope_api_key"), - text=sentence.strip(), - format=audio_type, - ) - if isinstance(response.get_audio_data(), bytes): - return response.get_audio_data() - - -class Callback(ResultCallback): - def __init__(self, queue: Queue): - self._queue = queue - - def on_open(self): - pass - - def on_complete(self): - self._queue.put(None) - self._queue.task_done() - - def on_error(self, response: SpeechSynthesisResponse): - self._queue.put(None) - self._queue.task_done() - - def on_close(self): - self._queue.put(None) - self._queue.task_done() - - def on_event(self, result: SpeechSynthesisResult): - ad = result.get_audio_frame() - if ad: - self._queue.put(ad) diff --git a/api/core/model_runtime/model_providers/triton_inference_server/__init__.py b/api/core/model_runtime/model_providers/triton_inference_server/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/triton_inference_server/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/triton_inference_server/_assets/icon_l_en.png deleted file mode 100644 index dd32d45803..0000000000 Binary files a/api/core/model_runtime/model_providers/triton_inference_server/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/triton_inference_server/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/triton_inference_server/_assets/icon_s_en.svg deleted file mode 100644 index 9fc02f9164..0000000000 --- a/api/core/model_runtime/model_providers/triton_inference_server/_assets/icon_s_en.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/api/core/model_runtime/model_providers/triton_inference_server/llm/__init__.py b/api/core/model_runtime/model_providers/triton_inference_server/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/triton_inference_server/llm/llm.py b/api/core/model_runtime/model_providers/triton_inference_server/llm/llm.py deleted file mode 100644 index cf7e3f14be..0000000000 --- a/api/core/model_runtime/model_providers/triton_inference_server/llm/llm.py +++ /dev/null @@ -1,280 +0,0 @@ -from collections.abc import Generator - -from httpx import Response, post -from yarl import URL - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - ModelPropertyKey, - ModelType, - ParameterRule, - ParameterType, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel - - -class TritonInferenceAILargeLanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - """ - invoke LLM - - see `core.model_runtime.model_providers.__base.large_language_model.LargeLanguageModel._invoke` - """ - return self._generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - validate credentials - """ - if "server_url" not in credentials: - raise CredentialsValidateFailedError("server_url is required in credentials") - - try: - self._invoke( - model=model, - credentials=credentials, - prompt_messages=[UserPromptMessage(content="ping")], - model_parameters={}, - stream=False, - ) - except InvokeError as ex: - raise CredentialsValidateFailedError(f"An error occurred during connection: {str(ex)}") - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool] | None = None, - ) -> int: - """ - get number of tokens - - cause TritonInference LLM is a customized model, we could net detect which tokenizer to use - so we just take the GPT2 tokenizer as default - """ - return self._get_num_tokens_by_gpt2(self._convert_prompt_message_to_text(prompt_messages)) - - def _convert_prompt_message_to_text(self, message: list[PromptMessage]) -> str: - """ - convert prompt message to text - """ - text = "" - for item in message: - if isinstance(item, UserPromptMessage): - text += f"User: {item.content}" - elif isinstance(item, SystemPromptMessage): - text += f"System: {item.content}" - elif isinstance(item, AssistantPromptMessage): - text += f"Assistant: {item.content}" - else: - raise NotImplementedError(f"PromptMessage type {type(item)} is not supported") - return text - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - rules = [ - ParameterRule( - name="temperature", - type=ParameterType.FLOAT, - use_template="temperature", - label=I18nObject(zh_Hans="温度", en_US="Temperature"), - ), - ParameterRule( - name="top_p", - type=ParameterType.FLOAT, - use_template="top_p", - label=I18nObject(zh_Hans="Top P", en_US="Top P"), - ), - ParameterRule( - name="max_tokens", - type=ParameterType.INT, - use_template="max_tokens", - min=1, - max=int(credentials.get("context_length", 2048)), - default=min(512, int(credentials.get("context_length", 2048))), - label=I18nObject(zh_Hans="最大生成长度", en_US="Max Tokens"), - ), - ] - - completion_type = None - - if "completion_type" in credentials: - if credentials["completion_type"] == "chat": - completion_type = LLMMode.CHAT.value - elif credentials["completion_type"] == "completion": - completion_type = LLMMode.COMPLETION.value - else: - raise ValueError(f'completion_type {credentials["completion_type"]} is not supported') - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - parameter_rules=rules, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.LLM, - model_properties={ - ModelPropertyKey.MODE: completion_type, - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_length", 2048)), - }, - ) - - return entity - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - """ - generate text from LLM - """ - if "server_url" not in credentials: - raise CredentialsValidateFailedError("server_url is required in credentials") - - if "stream" in credentials and not bool(credentials["stream"]) and stream: - raise ValueError(f"stream is not supported by model {model}") - - try: - parameters = {} - if "temperature" in model_parameters: - parameters["temperature"] = model_parameters["temperature"] - if "top_p" in model_parameters: - parameters["top_p"] = model_parameters["top_p"] - if "top_k" in model_parameters: - parameters["top_k"] = model_parameters["top_k"] - if "presence_penalty" in model_parameters: - parameters["presence_penalty"] = model_parameters["presence_penalty"] - if "frequency_penalty" in model_parameters: - parameters["frequency_penalty"] = model_parameters["frequency_penalty"] - - response = post( - str(URL(credentials["server_url"]) / "v2" / "models" / model / "generate"), - json={ - "text_input": self._convert_prompt_message_to_text(prompt_messages), - "max_tokens": model_parameters.get("max_tokens", 512), - "parameters": {"stream": False, **parameters}, - }, - timeout=(10, 120), - ) - response.raise_for_status() - if response.status_code != 200: - raise InvokeBadRequestError(f"Invoke failed with status code {response.status_code}, {response.text}") - - if stream: - return self._handle_chat_stream_response( - model=model, credentials=credentials, prompt_messages=prompt_messages, tools=tools, resp=response - ) - return self._handle_chat_generate_response( - model=model, credentials=credentials, prompt_messages=prompt_messages, tools=tools, resp=response - ) - except Exception as ex: - raise InvokeConnectionError(f"An error occurred during connection: {str(ex)}") - - def _handle_chat_generate_response( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool], - resp: Response, - ) -> LLMResult: - """ - handle normal chat generate response - """ - text = resp.json()["text_output"] - - usage = LLMUsage.empty_usage() - usage.prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - usage.completion_tokens = self._get_num_tokens_by_gpt2(text) - - return LLMResult( - model=model, prompt_messages=prompt_messages, message=AssistantPromptMessage(content=text), usage=usage - ) - - def _handle_chat_stream_response( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool], - resp: Response, - ) -> Generator: - """ - handle normal chat generate response - """ - text = resp.json()["text_output"] - - usage = LLMUsage.empty_usage() - usage.prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - usage.completion_tokens = self._get_num_tokens_by_gpt2(text) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta(index=0, message=AssistantPromptMessage(content=text), usage=usage), - ) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [], - InvokeServerUnavailableError: [], - InvokeRateLimitError: [], - InvokeAuthorizationError: [], - InvokeBadRequestError: [ValueError], - } diff --git a/api/core/model_runtime/model_providers/triton_inference_server/triton_inference_server.py b/api/core/model_runtime/model_providers/triton_inference_server/triton_inference_server.py deleted file mode 100644 index d85f7c82e7..0000000000 --- a/api/core/model_runtime/model_providers/triton_inference_server/triton_inference_server.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class XinferenceAIProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/triton_inference_server/triton_inference_server.yaml b/api/core/model_runtime/model_providers/triton_inference_server/triton_inference_server.yaml deleted file mode 100644 index 218678b883..0000000000 --- a/api/core/model_runtime/model_providers/triton_inference_server/triton_inference_server.yaml +++ /dev/null @@ -1,84 +0,0 @@ -provider: triton_inference_server -label: - en_US: Triton Inference Server -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.png -background: "#EFFDFD" -help: - title: - en_US: How to deploy Triton Inference Server - zh_Hans: 如何部署 Triton Inference Server - url: - en_US: https://github.com/triton-inference-server/server -supported_model_types: - - llm -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: server_url - label: - zh_Hans: 服务器URL - en_US: Server url - type: text-input - required: true - placeholder: - zh_Hans: 在此输入 Triton Inference Server 的服务器地址,如 http://192.168.1.100:8000 - en_US: Enter the url of your Triton Inference Server, e.g. http://192.168.1.100:8000 - - variable: context_size - label: - zh_Hans: 上下文大小 - en_US: Context size - type: text-input - required: true - placeholder: - zh_Hans: 在此输入您的上下文大小 - en_US: Enter the context size - default: '2048' - - variable: completion_type - label: - zh_Hans: 补全类型 - en_US: Model type - type: select - required: true - default: chat - placeholder: - zh_Hans: 在此输入您的补全类型 - en_US: Enter the completion type - options: - - label: - zh_Hans: 补全模型 - en_US: Completion model - value: completion - - label: - zh_Hans: 对话模型 - en_US: Chat model - value: chat - - variable: stream - label: - zh_Hans: 流式输出 - en_US: Stream output - type: select - required: true - default: 'true' - placeholder: - zh_Hans: 是否支持流式输出 - en_US: Whether to support stream output - options: - - label: - zh_Hans: 是 - en_US: 'Yes' - value: 'true' - - label: - zh_Hans: 否 - en_US: 'No' - value: 'false' diff --git a/api/core/model_runtime/model_providers/upstage/__init__.py b/api/core/model_runtime/model_providers/upstage/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/upstage/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/upstage/_assets/icon_l_en.svg deleted file mode 100644 index 0761f85ba6..0000000000 --- a/api/core/model_runtime/model_providers/upstage/_assets/icon_l_en.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/upstage/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/upstage/_assets/icon_s_en.svg deleted file mode 100644 index 44ef12b730..0000000000 --- a/api/core/model_runtime/model_providers/upstage/_assets/icon_s_en.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/api/core/model_runtime/model_providers/upstage/_common.py b/api/core/model_runtime/model_providers/upstage/_common.py deleted file mode 100644 index 47ebaccd84..0000000000 --- a/api/core/model_runtime/model_providers/upstage/_common.py +++ /dev/null @@ -1,54 +0,0 @@ -from collections.abc import Mapping - -import openai -from httpx import Timeout - -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) - - -class _CommonUpstage: - def _to_credential_kwargs(self, credentials: Mapping) -> dict: - """ - Transform credentials to kwargs for model instance - - :param credentials: - :return: - """ - credentials_kwargs = { - "api_key": credentials["upstage_api_key"], - "base_url": "https://api.upstage.ai/v1/solar", - "timeout": Timeout(315.0, read=300.0, write=20.0, connect=10.0), - "max_retries": 1, - } - - return credentials_kwargs - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [openai.APIConnectionError, openai.APITimeoutError], - InvokeServerUnavailableError: [openai.InternalServerError], - InvokeRateLimitError: [openai.RateLimitError], - InvokeAuthorizationError: [openai.AuthenticationError, openai.PermissionDeniedError], - InvokeBadRequestError: [ - openai.BadRequestError, - openai.NotFoundError, - openai.UnprocessableEntityError, - openai.APIError, - ], - } diff --git a/api/core/model_runtime/model_providers/upstage/llm/__init__.py b/api/core/model_runtime/model_providers/upstage/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/upstage/llm/_position.yaml b/api/core/model_runtime/model_providers/upstage/llm/_position.yaml deleted file mode 100644 index 7992843dcb..0000000000 --- a/api/core/model_runtime/model_providers/upstage/llm/_position.yaml +++ /dev/null @@ -1 +0,0 @@ -- solar-1-mini-chat diff --git a/api/core/model_runtime/model_providers/upstage/llm/llm.py b/api/core/model_runtime/model_providers/upstage/llm/llm.py deleted file mode 100644 index a18ee90624..0000000000 --- a/api/core/model_runtime/model_providers/upstage/llm/llm.py +++ /dev/null @@ -1,603 +0,0 @@ -import logging -from collections.abc import Generator -from typing import Optional, Union, cast - -from openai import OpenAI, Stream -from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessageToolCall -from openai.types.chat.chat_completion_chunk import ChoiceDeltaFunctionCall, ChoiceDeltaToolCall -from openai.types.chat.chat_completion_message import FunctionCall -from tokenizers import Tokenizer - -from core.model_runtime.callbacks.base_callback import Callback -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.upstage._common import _CommonUpstage - -logger = logging.getLogger(__name__) - -UPSTAGE_BLOCK_MODE_PROMPT = """You should always follow the instructions and output a valid {{block}} object. -The structure of the {{block}} object you can found in the instructions, use {"answer": "$your_answer"} as the default structure -if you are not sure about the structure. - - -{{instructions}} - -""" # noqa: E501 - - -class UpstageLargeLanguageModel(_CommonUpstage, LargeLanguageModel): - """ - Model class for Upstage large language model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - - return self._chat_generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - def _code_block_mode_wrapper( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - callbacks: Optional[list[Callback]] = None, - ) -> Union[LLMResult, Generator]: - """ - Code block mode wrapper for invoking large language model - """ - if "response_format" in model_parameters and model_parameters["response_format"] in {"JSON", "XML"}: - stop = stop or [] - self._transform_chat_json_prompts( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - response_format=model_parameters["response_format"], - ) - model_parameters.pop("response_format") - - return self._invoke( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - def _transform_chat_json_prompts( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - response_format: str = "JSON", - ) -> None: - """ - Transform json prompts - """ - if stop is None: - stop = [] - if "```\n" not in stop: - stop.append("```\n") - if "\n```" not in stop: - stop.append("\n```") - - if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage): - prompt_messages[0] = SystemPromptMessage( - content=UPSTAGE_BLOCK_MODE_PROMPT.replace("{{instructions}}", prompt_messages[0].content).replace( - "{{block}}", response_format - ) - ) - prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}\n")) - else: - prompt_messages.insert( - 0, - SystemPromptMessage( - content=UPSTAGE_BLOCK_MODE_PROMPT.replace( - "{{instructions}}", f"Please output a valid {response_format} object." - ).replace("{{block}}", response_format) - ), - ) - prompt_messages.append(AssistantPromptMessage(content=f"\n```{response_format}")) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: - """ - return self._num_tokens_from_messages(model, prompt_messages, tools) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - - client.chat.completions.create( - messages=[{"role": "user", "content": "ping"}], model=model, temperature=0, max_tokens=10, stream=False - ) - except Exception as e: - raise CredentialsValidateFailedError(str(e)) - - def _chat_generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - - extra_model_kwargs = {} - - if tools: - extra_model_kwargs["functions"] = [ - {"name": tool.name, "description": tool.description, "parameters": tool.parameters} for tool in tools - ] - - if stop: - extra_model_kwargs["stop"] = stop - - if user: - extra_model_kwargs["user"] = user - - # chat model - response = client.chat.completions.create( - messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages], - model=model, - stream=stream, - **model_parameters, - **extra_model_kwargs, - ) - - if stream: - return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools) - return self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools) - - def _handle_chat_generate_response( - self, - model: str, - credentials: dict, - response: ChatCompletion, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> LLMResult: - """ - Handle llm chat response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: llm response - """ - assistant_message = response.choices[0].message - # assistant_message_tool_calls = assistant_message.tool_calls - assistant_message_function_call = assistant_message.function_call - - # extract tool calls from response - # tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls) - function_call = self._extract_response_function_call(assistant_message_function_call) - tool_calls = [function_call] if function_call else [] - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=assistant_message.content, tool_calls=tool_calls) - - # calculate num tokens - if response.usage: - # transform usage - prompt_tokens = response.usage.prompt_tokens - completion_tokens = response.usage.completion_tokens - else: - # calculate num tokens - prompt_tokens = self._num_tokens_from_messages(model, prompt_messages, tools) - completion_tokens = self._num_tokens_from_messages(model, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - response = LLMResult( - model=response.model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - system_fingerprint=response.system_fingerprint, - ) - - return response - - def _handle_chat_generate_stream_response( - self, - model: str, - credentials: dict, - response: Stream[ChatCompletionChunk], - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> Generator: - """ - Handle llm chat stream response - - :param model: model name - :param response: response - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: llm response chunk generator - """ - full_assistant_content = "" - delta_assistant_message_function_call_storage: Optional[ChoiceDeltaFunctionCall] = None - prompt_tokens = 0 - completion_tokens = 0 - final_tool_calls = [] - final_chunk = LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=""), - ), - ) - - for chunk in response: - if len(chunk.choices) == 0: - if chunk.usage: - # calculate num tokens - prompt_tokens = chunk.usage.prompt_tokens - completion_tokens = chunk.usage.completion_tokens - continue - - delta = chunk.choices[0] - has_finish_reason = delta.finish_reason is not None - - if ( - not has_finish_reason - and (delta.delta.content is None or delta.delta.content == "") - and delta.delta.function_call is None - ): - continue - - # assistant_message_tool_calls = delta.delta.tool_calls - assistant_message_function_call = delta.delta.function_call - - # extract tool calls from response - if delta_assistant_message_function_call_storage is not None: - # handle process of stream function call - if assistant_message_function_call: - # message has not ended ever - delta_assistant_message_function_call_storage.arguments += assistant_message_function_call.arguments - continue - else: - # message has ended - assistant_message_function_call = delta_assistant_message_function_call_storage - delta_assistant_message_function_call_storage = None - else: - if assistant_message_function_call: - # start of stream function call - delta_assistant_message_function_call_storage = assistant_message_function_call - if delta_assistant_message_function_call_storage.arguments is None: - delta_assistant_message_function_call_storage.arguments = "" - if not has_finish_reason: - continue - - # tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls) - function_call = self._extract_response_function_call(assistant_message_function_call) - tool_calls = [function_call] if function_call else [] - if tool_calls: - final_tool_calls.extend(tool_calls) - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=delta.delta.content or "", tool_calls=tool_calls) - - full_assistant_content += delta.delta.content or "" - - if has_finish_reason: - final_chunk = LLMResultChunk( - model=chunk.model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - finish_reason=delta.finish_reason, - ), - ) - else: - yield LLMResultChunk( - model=chunk.model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - ), - ) - - if not prompt_tokens: - prompt_tokens = self._num_tokens_from_messages(model, prompt_messages, tools) - - if not completion_tokens: - full_assistant_prompt_message = AssistantPromptMessage( - content=full_assistant_content, tool_calls=final_tool_calls - ) - completion_tokens = self._num_tokens_from_messages(model, [full_assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - final_chunk.delta.usage = usage - - yield final_chunk - - def _extract_response_tool_calls( - self, response_tool_calls: list[ChatCompletionMessageToolCall | ChoiceDeltaToolCall] - ) -> list[AssistantPromptMessage.ToolCall]: - """ - Extract tool calls from response - - :param response_tool_calls: response tool calls - :return: list of tool calls - """ - tool_calls = [] - if response_tool_calls: - for response_tool_call in response_tool_calls: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_tool_call.function.name, arguments=response_tool_call.function.arguments - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_tool_call.id, type=response_tool_call.type, function=function - ) - tool_calls.append(tool_call) - - return tool_calls - - def _extract_response_function_call( - self, response_function_call: FunctionCall | ChoiceDeltaFunctionCall - ) -> AssistantPromptMessage.ToolCall: - """ - Extract function call from response - - :param response_function_call: response function call - :return: tool call - """ - tool_call = None - if response_function_call: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_function_call.name, arguments=response_function_call.arguments - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_function_call.name, type="function", function=function - ) - - return tool_call - - def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: - """ - Convert PromptMessage to dict for Upstage API - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - sub_messages = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - sub_message_dict = {"type": "text", "text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - sub_message_dict = { - "type": "image_url", - "image_url": {"url": message_content.data, "detail": message_content.detail.value}, - } - sub_messages.append(sub_message_dict) - - message_dict = {"role": "user", "content": sub_messages} - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - if message.tool_calls: - # message_dict["tool_calls"] = [tool_call.dict() for tool_call in - # message.tool_calls] - function_call = message.tool_calls[0] - message_dict["function_call"] = { - "name": function_call.function.name, - "arguments": function_call.function.arguments, - } - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - # message_dict = { - # "role": "tool", - # "content": message.content, - # "tool_call_id": message.tool_call_id - # } - message_dict = {"role": "function", "content": message.content, "name": message.tool_call_id} - else: - raise ValueError(f"Got unknown type {message}") - - if message.name: - message_dict["name"] = message.name - - return message_dict - - def _get_tokenizer(self) -> Tokenizer: - return Tokenizer.from_pretrained("upstage/solar-1-mini-tokenizer") - - def _num_tokens_from_messages( - self, model: str, messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None - ) -> int: - """ - Calculate num tokens for solar with Huggingface Solar tokenizer. - Solar tokenizer is opened in huggingface https://huggingface.co/upstage/solar-1-mini-tokenizer - """ - tokenizer = self._get_tokenizer() - tokens_per_message = 5 # <|im_start|>{role}\n{message}<|im_end|> - tokens_prefix = 1 # <|startoftext|> - tokens_suffix = 3 # <|im_start|>assistant\n - - num_tokens = 0 - num_tokens += tokens_prefix - - messages_dict = [self._convert_prompt_message_to_dict(message) for message in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - value = text - - if key == "tool_calls": - for tool_call in value: - for t_key, t_value in tool_call.items(): - num_tokens += len(tokenizer.encode(t_key, add_special_tokens=False)) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += len(tokenizer.encode(f_key, add_special_tokens=False)) - num_tokens += len(tokenizer.encode(f_value, add_special_tokens=False)) - else: - num_tokens += len(tokenizer.encode(t_key, add_special_tokens=False)) - num_tokens += len(tokenizer.encode(t_value, add_special_tokens=False)) - else: - num_tokens += len(tokenizer.encode(str(value), add_special_tokens=False)) - num_tokens += tokens_suffix - - if tools: - num_tokens += self._num_tokens_for_tools(tokenizer, tools) - - return num_tokens - - def _num_tokens_for_tools(self, tokenizer: Tokenizer, tools: list[PromptMessageTool]) -> int: - """ - Calculate num tokens for tool calling with upstage tokenizer. - - :param tokenizer: huggingface tokenizer - :param tools: tools for tool calling - :return: number of tokens - """ - num_tokens = 0 - for tool in tools: - num_tokens += len(tokenizer.encode("type")) - num_tokens += len(tokenizer.encode("function")) - - # calculate num tokens for function object - num_tokens += len(tokenizer.encode("name")) - num_tokens += len(tokenizer.encode(tool.name)) - num_tokens += len(tokenizer.encode("description")) - num_tokens += len(tokenizer.encode(tool.description)) - parameters = tool.parameters - num_tokens += len(tokenizer.encode("parameters")) - if "title" in parameters: - num_tokens += len(tokenizer.encode("title")) - num_tokens += len(tokenizer.encode(parameters.get("title"))) - num_tokens += len(tokenizer.encode("type")) - num_tokens += len(tokenizer.encode(parameters.get("type"))) - if "properties" in parameters: - num_tokens += len(tokenizer.encode("properties")) - for key, value in parameters.get("properties").items(): - num_tokens += len(tokenizer.encode(key)) - for field_key, field_value in value.items(): - num_tokens += len(tokenizer.encode(field_key)) - if field_key == "enum": - for enum_field in field_value: - num_tokens += 3 - num_tokens += len(tokenizer.encode(enum_field)) - else: - num_tokens += len(tokenizer.encode(field_key)) - num_tokens += len(tokenizer.encode(str(field_value))) - if "required" in parameters: - num_tokens += len(tokenizer.encode("required")) - for required_field in parameters["required"]: - num_tokens += 3 - num_tokens += len(tokenizer.encode(required_field)) - - return num_tokens diff --git a/api/core/model_runtime/model_providers/upstage/llm/solar-1-mini-chat.yaml b/api/core/model_runtime/model_providers/upstage/llm/solar-1-mini-chat.yaml deleted file mode 100644 index 787ac83f8a..0000000000 --- a/api/core/model_runtime/model_providers/upstage/llm/solar-1-mini-chat.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: solar-1-mini-chat -label: - zh_Hans: solar-1-mini-chat - en_US: solar-1-mini-chat - ko_KR: solar-1-mini-chat -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 32768 - - name: seed - label: - zh_Hans: 种子 - en_US: Seed - type: int - help: - zh_Hans: - 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint - 响应参数来监视变化。 - en_US: - If specified, model will make a best effort to sample deterministically, - such that repeated requests with the same seed and parameters should return - the same result. Determinism is not guaranteed, and you should refer to the - system_fingerprint response parameter to monitor changes in the backend. - required: false -pricing: - input: "0.5" - output: "0.5" - unit: "0.000001" - currency: USD diff --git a/api/core/model_runtime/model_providers/upstage/text_embedding/__init__.py b/api/core/model_runtime/model_providers/upstage/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/upstage/text_embedding/solar-embedding-1-large-passage.yaml b/api/core/model_runtime/model_providers/upstage/text_embedding/solar-embedding-1-large-passage.yaml deleted file mode 100644 index d838a5bbb1..0000000000 --- a/api/core/model_runtime/model_providers/upstage/text_embedding/solar-embedding-1-large-passage.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: solar-embedding-1-large-passage -model_type: text-embedding -model_properties: - context_size: 4000 - max_chunks: 32 -pricing: - input: '0.1' - unit: '0.000001' - currency: 'USD' diff --git a/api/core/model_runtime/model_providers/upstage/text_embedding/solar-embedding-1-large-query.yaml b/api/core/model_runtime/model_providers/upstage/text_embedding/solar-embedding-1-large-query.yaml deleted file mode 100644 index c77645cffd..0000000000 --- a/api/core/model_runtime/model_providers/upstage/text_embedding/solar-embedding-1-large-query.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: solar-embedding-1-large-query -model_type: text-embedding -model_properties: - context_size: 4000 - max_chunks: 32 -pricing: - input: '0.1' - unit: '0.000001' - currency: 'USD' diff --git a/api/core/model_runtime/model_providers/upstage/upstage.py b/api/core/model_runtime/model_providers/upstage/upstage.py deleted file mode 100644 index e45d4aae19..0000000000 --- a/api/core/model_runtime/model_providers/upstage/upstage.py +++ /dev/null @@ -1,27 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class UpstageProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials from defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - model_instance.validate_credentials(model="solar-1-mini-chat", credentials=credentials) - except CredentialsValidateFailedError as e: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise e - except Exception as e: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise e diff --git a/api/core/model_runtime/model_providers/upstage/upstage.yaml b/api/core/model_runtime/model_providers/upstage/upstage.yaml deleted file mode 100644 index 837667cfa9..0000000000 --- a/api/core/model_runtime/model_providers/upstage/upstage.yaml +++ /dev/null @@ -1,49 +0,0 @@ -provider: upstage -label: - en_US: Upstage -description: - en_US: Models provided by Upstage, such as Solar-1-mini-chat. - zh_Hans: Upstage 提供的模型,例如 Solar-1-mini-chat. -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#FFFFF" -help: - title: - en_US: Get your API Key from Upstage - zh_Hans: 从 Upstage 获取 API Key - url: - en_US: https://console.upstage.ai/api-keys -supported_model_types: - - llm - - text-embedding -configurate_methods: - - predefined-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: upstage_api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key -provider_credential_schema: - credential_form_schemas: - - variable: upstage_api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/vertex_ai/__init__.py b/api/core/model_runtime/model_providers/vertex_ai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/vertex_ai/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/vertex_ai/_assets/icon_l_en.png deleted file mode 100644 index 9f8f05231a..0000000000 Binary files a/api/core/model_runtime/model_providers/vertex_ai/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/vertex_ai/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/vertex_ai/_assets/icon_s_en.svg deleted file mode 100644 index efc3589c07..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/_assets/icon_s_en.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/vertex_ai/_common.py b/api/core/model_runtime/model_providers/vertex_ai/_common.py deleted file mode 100644 index 8f7c859e38..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/_common.py +++ /dev/null @@ -1,15 +0,0 @@ -from core.model_runtime.errors.invoke import InvokeError - - -class _CommonVertexAi: - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - pass diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/__init__.py b/api/core/model_runtime/model_providers/vertex_ai/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3-haiku.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3-haiku.yaml deleted file mode 100644 index 5613348695..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3-haiku.yaml +++ /dev/null @@ -1,56 +0,0 @@ -model: claude-3-haiku@20240307 -label: - en_US: Claude 3 Haiku -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - # docs: https://docs.anthropic.com/claude/docs/system-prompts - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.00025' - output: '0.00125' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3-opus.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3-opus.yaml deleted file mode 100644 index ab084636b5..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3-opus.yaml +++ /dev/null @@ -1,56 +0,0 @@ -model: claude-3-opus@20240229 -label: - en_US: Claude 3 Opus -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - # docs: https://docs.anthropic.com/claude/docs/system-prompts - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.015' - output: '0.075' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3-sonnet.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3-sonnet.yaml deleted file mode 100644 index 0be0113ffd..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3-sonnet.yaml +++ /dev/null @@ -1,55 +0,0 @@ -model: claude-3-sonnet@20240229 -label: - en_US: Claude 3 Sonnet -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.003' - output: '0.015' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3.5-sonnet.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3.5-sonnet.yaml deleted file mode 100644 index c64384e6a2..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/anthropic.claude-3.5-sonnet.yaml +++ /dev/null @@ -1,55 +0,0 @@ -model: claude-3-5-sonnet@20240620 -label: - en_US: Claude 3.5 Sonnet -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: max_tokens - use_template: max_tokens - required: true - type: int - default: 4096 - min: 1 - max: 4096 - help: - zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 - en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. - - name: temperature - use_template: temperature - required: false - type: float - default: 1 - min: 0.0 - max: 1.0 - help: - zh_Hans: 生成内容的随机性。 - en_US: The amount of randomness injected into the response. - - name: top_p - required: false - type: float - default: 0.999 - min: 0.000 - max: 1.000 - help: - zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 - en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. - - name: top_k - required: false - type: int - default: 0 - min: 0 - # tip docs from aws has error, max value is 500 - max: 500 - help: - zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 - en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. -pricing: - input: '0.003' - output: '0.015' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.0-pro-vision.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.0-pro-vision.yaml deleted file mode 100644 index ebb276b8af..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.0-pro-vision.yaml +++ /dev/null @@ -1,37 +0,0 @@ -model: gemini-1.0-pro-vision-001 -label: - en_US: Gemini 1.0 Pro Vision -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 16384 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - en_US: Top k - type: int - help: - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_output_tokens - use_template: max_tokens - required: true - default: 2048 - min: 1 - max: 2048 -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.0-pro.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.0-pro.yaml deleted file mode 100644 index c325973846..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.0-pro.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: gemini-1.0-pro-002 -label: - en_US: Gemini 1.0 Pro -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32760 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - en_US: Top k - type: int - help: - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_output_tokens - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/__init__.py b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text-embedding-004.yaml b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text-embedding-004.yaml deleted file mode 100644 index 32db6faf89..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text-embedding-004.yaml +++ /dev/null @@ -1,8 +0,0 @@ -model: text-embedding-004 -model_type: text-embedding -model_properties: - context_size: 2048 -pricing: - input: '0.00013' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text-multilingual-embedding-002.yaml b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text-multilingual-embedding-002.yaml deleted file mode 100644 index 2ec0eea9f2..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text-multilingual-embedding-002.yaml +++ /dev/null @@ -1,8 +0,0 @@ -model: text-multilingual-embedding-002 -model_type: text-embedding -model_properties: - context_size: 2048 -pricing: - input: '0.00013' - unit: '0.001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/vertex_ai.py b/api/core/model_runtime/model_providers/vertex_ai/vertex_ai.py deleted file mode 100644 index 466a86fd36..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/vertex_ai.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class VertexAiProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `gemini-1.0-pro-002` model for validate, - model_instance.validate_credentials(model="gemini-1.0-pro-002", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/vertex_ai/vertex_ai.yaml b/api/core/model_runtime/model_providers/vertex_ai/vertex_ai.yaml deleted file mode 100644 index 27a4d03fe2..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/vertex_ai.yaml +++ /dev/null @@ -1,43 +0,0 @@ -provider: vertex_ai -label: - en_US: Vertex AI | Google Cloud Platform -description: - en_US: Vertex AI in Google Cloud Platform. -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.png -background: "#FCFDFF" -help: - title: - en_US: Get your Access Details from Google - url: - en_US: https://cloud.google.com/vertex-ai/ -supported_model_types: - - llm - - text-embedding -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: vertex_project_id - label: - en_US: Project ID - type: text-input - required: true - placeholder: - en_US: Enter your Google Cloud Project ID - - variable: vertex_location - label: - en_US: Location - type: text-input - required: true - placeholder: - en_US: Enter your Google Cloud Location - - variable: vertex_service_account_key - label: - en_US: Service Account Key (Leave blank if you use Application Default Credentials) - type: secret-input - required: false - placeholder: - en_US: Enter your Google Cloud Service Account Key in base64 format diff --git a/api/core/model_runtime/model_providers/volcengine_maas/__init__.py b/api/core/model_runtime/model_providers/volcengine_maas/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_en.svg deleted file mode 100644 index 616e90916b..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_en.svg +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_zh.svg b/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_zh.svg deleted file mode 100644 index 24b92195bd..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_l_zh.svg +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_s_en.svg deleted file mode 100644 index e6454a89b7..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/_assets/icon_s_en.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/volcengine_maas/client.py b/api/core/model_runtime/model_providers/volcengine_maas/client.py deleted file mode 100644 index cfe21e4b9f..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/client.py +++ /dev/null @@ -1,216 +0,0 @@ -import re -from collections.abc import Generator -from typing import Optional, cast - -from volcenginesdkarkruntime import Ark -from volcenginesdkarkruntime.types.chat import ( - ChatCompletion, - ChatCompletionAssistantMessageParam, - ChatCompletionChunk, - ChatCompletionContentPartImageParam, - ChatCompletionContentPartTextParam, - ChatCompletionMessageParam, - ChatCompletionMessageToolCallParam, - ChatCompletionSystemMessageParam, - ChatCompletionToolMessageParam, - ChatCompletionToolParam, - ChatCompletionUserMessageParam, -) -from volcenginesdkarkruntime.types.chat.chat_completion_content_part_image_param import ImageURL -from volcenginesdkarkruntime.types.chat.chat_completion_message_tool_call_param import Function -from volcenginesdkarkruntime.types.create_embedding_response import CreateEmbeddingResponse -from volcenginesdkarkruntime.types.shared_params import FunctionDefinition - -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) - -DEFAULT_V2_ENDPOINT = "maas-api.ml-platform-cn-beijing.volces.com" -DEFAULT_V3_ENDPOINT = "https://ark.cn-beijing.volces.com/api/v3" - - -class ArkClientV3: - endpoint_id: Optional[str] = None - ark: Optional[Ark] = None - - def __init__(self, *args, **kwargs): - self.ark = Ark(*args, **kwargs) - self.endpoint_id = None - - @staticmethod - def is_legacy(credentials: dict) -> bool: - # match default v2 endpoint - if ArkClientV3.is_compatible_with_legacy(credentials): - return False - # match default v3 endpoint - if credentials.get("api_endpoint_host") == DEFAULT_V3_ENDPOINT: - return False - # only v3 support api_key - if credentials.get("auth_method") == "api_key": - return False - # these cases are considered as sdk v2 - # - modified default v2 endpoint - # - modified default v3 endpoint and auth without api_key - return True - - @staticmethod - def is_compatible_with_legacy(credentials: dict) -> bool: - endpoint = credentials.get("api_endpoint_host") - return endpoint == DEFAULT_V2_ENDPOINT - - @classmethod - def from_credentials(cls, credentials): - """Initialize the client using the credentials provided.""" - args = { - "base_url": credentials["api_endpoint_host"], - "region": credentials["volc_region"], - } - if credentials.get("auth_method") == "api_key": - args = { - **args, - "api_key": credentials["volc_api_key"], - } - else: - args = { - **args, - "ak": credentials["volc_access_key_id"], - "sk": credentials["volc_secret_access_key"], - } - - if cls.is_compatible_with_legacy(credentials): - args = {**args, "base_url": DEFAULT_V3_ENDPOINT} - - client = ArkClientV3(**args) - client.endpoint_id = credentials["endpoint_id"] - return client - - @staticmethod - def convert_prompt_message(message: PromptMessage) -> ChatCompletionMessageParam: - """Converts a PromptMessage to a ChatCompletionMessageParam""" - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - content = message.content - else: - content = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - content.append( - ChatCompletionContentPartTextParam( - text=message_content.text, - type="text", - ) - ) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - image_data = re.sub(r"^data:image\/[a-zA-Z]+;base64,", "", message_content.data) - content.append( - ChatCompletionContentPartImageParam( - image_url=ImageURL( - url=image_data, - detail=message_content.detail.value, - ), - type="image_url", - ) - ) - message_dict = ChatCompletionUserMessageParam(role="user", content=content) - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = ChatCompletionAssistantMessageParam( - content=message.content, - role="assistant", - tool_calls=None - if not message.tool_calls - else [ - ChatCompletionMessageToolCallParam( - id=call.id, - function=Function(name=call.function.name, arguments=call.function.arguments), - type="function", - ) - for call in message.tool_calls - ], - ) - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = ChatCompletionSystemMessageParam(content=message.content, role="system") - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - message_dict = ChatCompletionToolMessageParam( - content=message.content, role="tool", tool_call_id=message.tool_call_id - ) - else: - raise ValueError(f"Got unknown PromptMessage type {message}") - - return message_dict - - @staticmethod - def _convert_tool_prompt(message: PromptMessageTool) -> ChatCompletionToolParam: - return ChatCompletionToolParam( - type="function", - function=FunctionDefinition( - name=message.name, - description=message.description, - parameters=message.parameters, - ), - ) - - def chat( - self, - messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - frequency_penalty: Optional[float] = None, - max_tokens: Optional[int] = None, - presence_penalty: Optional[float] = None, - top_p: Optional[float] = None, - temperature: Optional[float] = None, - ) -> ChatCompletion: - """Block chat""" - return self.ark.chat.completions.create( - model=self.endpoint_id, - messages=[self.convert_prompt_message(message) for message in messages], - tools=[self._convert_tool_prompt(tool) for tool in tools] if tools else None, - stop=stop, - frequency_penalty=frequency_penalty, - max_tokens=max_tokens, - presence_penalty=presence_penalty, - top_p=top_p, - temperature=temperature, - ) - - def stream_chat( - self, - messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - frequency_penalty: Optional[float] = None, - max_tokens: Optional[int] = None, - presence_penalty: Optional[float] = None, - top_p: Optional[float] = None, - temperature: Optional[float] = None, - ) -> Generator[ChatCompletionChunk]: - """Stream chat""" - chunks = self.ark.chat.completions.create( - stream=True, - model=self.endpoint_id, - messages=[self.convert_prompt_message(message) for message in messages], - tools=[self._convert_tool_prompt(tool) for tool in tools] if tools else None, - stop=stop, - frequency_penalty=frequency_penalty, - max_tokens=max_tokens, - presence_penalty=presence_penalty, - top_p=top_p, - temperature=temperature, - stream_options={"include_usage": True}, - ) - yield from chunks - - def embeddings(self, texts: list[str]) -> CreateEmbeddingResponse: - return self.ark.embeddings.create(model=self.endpoint_id, input=texts) diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/__init__.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/client.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/client.py deleted file mode 100644 index 266f1216f8..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/client.py +++ /dev/null @@ -1,123 +0,0 @@ -import re -from collections.abc import Callable, Generator -from typing import cast - -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.model_providers.volcengine_maas.legacy.errors import wrap_error -from core.model_runtime.model_providers.volcengine_maas.legacy.volc_sdk import ChatRole, MaasError, MaasService - - -class MaaSClient(MaasService): - def __init__(self, host: str, region: str): - self.endpoint_id = None - super().__init__(host, region) - - def set_endpoint_id(self, endpoint_id: str): - self.endpoint_id = endpoint_id - - @classmethod - def from_credential(cls, credentials: dict) -> "MaaSClient": - host = credentials["api_endpoint_host"] - region = credentials["volc_region"] - ak = credentials["volc_access_key_id"] - sk = credentials["volc_secret_access_key"] - endpoint_id = credentials["endpoint_id"] - - client = cls(host, region) - client.set_endpoint_id(endpoint_id) - client.set_ak(ak) - client.set_sk(sk) - return client - - def chat(self, params: dict, messages: list[PromptMessage], stream=False, **extra_model_kwargs) -> Generator | dict: - req = { - "parameters": params, - "messages": [self.convert_prompt_message_to_maas_message(prompt) for prompt in messages], - **extra_model_kwargs, - } - if not stream: - return super().chat( - self.endpoint_id, - req, - ) - return super().stream_chat( - self.endpoint_id, - req, - ) - - def embeddings(self, texts: list[str]) -> dict: - req = {"input": texts} - return super().embeddings(self.endpoint_id, req) - - @staticmethod - def convert_prompt_message_to_maas_message(message: PromptMessage) -> dict: - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": ChatRole.USER, "content": message.content} - else: - content = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - raise ValueError("Content object type only support image_url") - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - image_data = re.sub(r"^data:image\/[a-zA-Z]+;base64,", "", message_content.data) - content.append( - { - "type": "image_url", - "image_url": { - "url": "", - "image_bytes": image_data, - "detail": message_content.detail, - }, - } - ) - - message_dict = {"role": ChatRole.USER, "content": content} - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": ChatRole.ASSISTANT, "content": message.content} - if message.tool_calls: - message_dict["tool_calls"] = [ - {"name": call.function.name, "arguments": call.function.arguments} for call in message.tool_calls - ] - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": ChatRole.SYSTEM, "content": message.content} - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - message_dict = {"role": ChatRole.FUNCTION, "content": message.content, "name": message.tool_call_id} - else: - raise ValueError(f"Got unknown PromptMessage type {message}") - - return message_dict - - @staticmethod - def wrap_exception(fn: Callable[[], dict | Generator]) -> dict | Generator: - try: - resp = fn() - except MaasError as e: - raise wrap_error(e) - - return resp - - @staticmethod - def transform_tool_prompt_to_maas_config(tool: PromptMessageTool): - return { - "type": "function", - "function": { - "name": tool.name, - "description": tool.description, - "parameters": tool.parameters, - }, - } diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/errors.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/errors.py deleted file mode 100644 index 91dbe21a61..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/errors.py +++ /dev/null @@ -1,156 +0,0 @@ -from core.model_runtime.model_providers.volcengine_maas.legacy.volc_sdk import MaasError - - -class ClientSDKRequestError(MaasError): - pass - - -class SignatureDoesNotMatchError(MaasError): - pass - - -class RequestTimeoutError(MaasError): - pass - - -class ServiceConnectionTimeoutError(MaasError): - pass - - -class MissingAuthenticationHeaderError(MaasError): - pass - - -class AuthenticationHeaderIsInvalidError(MaasError): - pass - - -class InternalServiceError(MaasError): - pass - - -class MissingParameterError(MaasError): - pass - - -class InvalidParameterError(MaasError): - pass - - -class AuthenticationExpireError(MaasError): - pass - - -class EndpointIsInvalidError(MaasError): - pass - - -class EndpointIsNotEnableError(MaasError): - pass - - -class ModelNotSupportStreamModeError(MaasError): - pass - - -class ReqTextExistRiskError(MaasError): - pass - - -class RespTextExistRiskError(MaasError): - pass - - -class EndpointRateLimitExceededError(MaasError): - pass - - -class ServiceConnectionRefusedError(MaasError): - pass - - -class ServiceConnectionClosedError(MaasError): - pass - - -class UnauthorizedUserForEndpointError(MaasError): - pass - - -class InvalidEndpointWithNoURLError(MaasError): - pass - - -class EndpointAccountRpmRateLimitExceededError(MaasError): - pass - - -class EndpointAccountTpmRateLimitExceededError(MaasError): - pass - - -class ServiceResourceWaitQueueFullError(MaasError): - pass - - -class EndpointIsPendingError(MaasError): - pass - - -class ServiceNotOpenError(MaasError): - pass - - -AuthErrors = { - "SignatureDoesNotMatch": SignatureDoesNotMatchError, - "MissingAuthenticationHeader": MissingAuthenticationHeaderError, - "AuthenticationHeaderIsInvalid": AuthenticationHeaderIsInvalidError, - "AuthenticationExpire": AuthenticationExpireError, - "UnauthorizedUserForEndpoint": UnauthorizedUserForEndpointError, -} - -BadRequestErrors = { - "MissingParameter": MissingParameterError, - "InvalidParameter": InvalidParameterError, - "EndpointIsInvalid": EndpointIsInvalidError, - "EndpointIsNotEnable": EndpointIsNotEnableError, - "ModelNotSupportStreamMode": ModelNotSupportStreamModeError, - "ReqTextExistRisk": ReqTextExistRiskError, - "RespTextExistRisk": RespTextExistRiskError, - "InvalidEndpointWithNoURL": InvalidEndpointWithNoURLError, - "ServiceNotOpen": ServiceNotOpenError, -} - -RateLimitErrors = { - "EndpointRateLimitExceeded": EndpointRateLimitExceededError, - "EndpointAccountRpmRateLimitExceeded": EndpointAccountRpmRateLimitExceededError, - "EndpointAccountTpmRateLimitExceeded": EndpointAccountTpmRateLimitExceededError, -} - -ServerUnavailableErrors = { - "InternalServiceError": InternalServiceError, - "EndpointIsPending": EndpointIsPendingError, - "ServiceResourceWaitQueueFull": ServiceResourceWaitQueueFullError, -} - -ConnectionErrors = { - "ClientSDKRequestError": ClientSDKRequestError, - "RequestTimeout": RequestTimeoutError, - "ServiceConnectionTimeout": ServiceConnectionTimeoutError, - "ServiceConnectionRefused": ServiceConnectionRefusedError, - "ServiceConnectionClosed": ServiceConnectionClosedError, -} - -ErrorCodeMap = { - **AuthErrors, - **BadRequestErrors, - **RateLimitErrors, - **ServerUnavailableErrors, - **ConnectionErrors, -} - - -def wrap_error(e: MaasError) -> Exception: - if ErrorCodeMap.get(e.code): - return ErrorCodeMap.get(e.code)(e.code_n, e.code, e.message, e.req_id) - return e diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/__init__.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/__init__.py deleted file mode 100644 index 8b3eb157be..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .common import ChatRole -from .maas import MaasError, MaasService - -__all__ = ["MaasService", "ChatRole", "MaasError"] diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/__init__.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/__init__.py deleted file mode 100644 index 8b13789179..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/auth.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/auth.py deleted file mode 100644 index c22bf8e76d..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/auth.py +++ /dev/null @@ -1,159 +0,0 @@ -# coding : utf-8 -import datetime -from itertools import starmap - -import pytz - -from .util import Util - - -class MetaData: - def __init__(self): - self.algorithm = "" - self.credential_scope = "" - self.signed_headers = "" - self.date = "" - self.region = "" - self.service = "" - - def set_date(self, date): - self.date = date - - def set_service(self, service): - self.service = service - - def set_region(self, region): - self.region = region - - def set_algorithm(self, algorithm): - self.algorithm = algorithm - - def set_credential_scope(self, credential_scope): - self.credential_scope = credential_scope - - def set_signed_headers(self, signed_headers): - self.signed_headers = signed_headers - - -class SignResult: - def __init__(self): - self.xdate = "" - self.xCredential = "" - self.xAlgorithm = "" - self.xSignedHeaders = "" - self.xSignedQueries = "" - self.xSignature = "" - self.xContextSha256 = "" - self.xSecurityToken = "" - - self.authorization = "" - - def __str__(self): - return "\n".join(list(starmap("{}:{}".format, self.__dict__.items()))) - - -class Credentials: - def __init__(self, ak, sk, service, region, session_token=""): - self.ak = ak - self.sk = sk - self.service = service - self.region = region - self.session_token = session_token - - def set_ak(self, ak): - self.ak = ak - - def set_sk(self, sk): - self.sk = sk - - def set_session_token(self, session_token): - self.session_token = session_token - - -class Signer: - @staticmethod - def sign(request, credentials): - if request.path == "": - request.path = "/" - if request.method != "GET" and "Content-Type" not in request.headers: - request.headers["Content-Type"] = "application/x-www-form-urlencoded; charset=utf-8" - - format_date = Signer.get_current_format_date() - request.headers["X-Date"] = format_date - if credentials.session_token != "": - request.headers["X-Security-Token"] = credentials.session_token - - md = MetaData() - md.set_algorithm("HMAC-SHA256") - md.set_service(credentials.service) - md.set_region(credentials.region) - md.set_date(format_date[:8]) - - hashed_canon_req = Signer.hashed_canonical_request_v4(request, md) - md.set_credential_scope("/".join([md.date, md.region, md.service, "request"])) - - signing_str = "\n".join([md.algorithm, format_date, md.credential_scope, hashed_canon_req]) - signing_key = Signer.get_signing_secret_key_v4(credentials.sk, md.date, md.region, md.service) - sign = Util.to_hex(Util.hmac_sha256(signing_key, signing_str)) - request.headers["Authorization"] = Signer.build_auth_header_v4(sign, md, credentials) - - @staticmethod - def hashed_canonical_request_v4(request, meta): - body_hash = Util.sha256(request.body) - request.headers["X-Content-Sha256"] = body_hash - - signed_headers = {} - for key in request.headers: - if key in {"Content-Type", "Content-Md5", "Host"} or key.startswith("X-"): - signed_headers[key.lower()] = request.headers[key] - - if "host" in signed_headers: - v = signed_headers["host"] - if v.find(":") != -1: - split = v.split(":") - port = split[1] - if str(port) == "80" or str(port) == "443": - signed_headers["host"] = split[0] - - signed_str = "" - for key in sorted(signed_headers.keys()): - signed_str += key + ":" + signed_headers[key] + "\n" - - meta.set_signed_headers(";".join(sorted(signed_headers.keys()))) - - canonical_request = "\n".join( - [ - request.method, - Util.norm_uri(request.path), - Util.norm_query(request.query), - signed_str, - meta.signed_headers, - body_hash, - ] - ) - - return Util.sha256(canonical_request) - - @staticmethod - def get_signing_secret_key_v4(sk, date, region, service): - date = Util.hmac_sha256(bytes(sk, encoding="utf-8"), date) - region = Util.hmac_sha256(date, region) - service = Util.hmac_sha256(region, service) - return Util.hmac_sha256(service, "request") - - @staticmethod - def build_auth_header_v4(signature, meta, credentials): - credential = credentials.ak + "/" + meta.credential_scope - return ( - meta.algorithm - + " Credential=" - + credential - + ", SignedHeaders=" - + meta.signed_headers - + ", Signature=" - + signature - ) - - @staticmethod - def get_current_format_date(): - return datetime.datetime.now(tz=pytz.timezone("UTC")).strftime("%Y%m%dT%H%M%SZ") diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/service.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/service.py deleted file mode 100644 index 33c41f3eb3..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/service.py +++ /dev/null @@ -1,216 +0,0 @@ -import json -from collections import OrderedDict -from urllib.parse import urlencode - -import requests - -from .auth import Signer - -VERSION = "v1.0.137" - - -class Service: - def __init__(self, service_info, api_info): - self.service_info = service_info - self.api_info = api_info - self.session = requests.session() - - def set_ak(self, ak): - self.service_info.credentials.set_ak(ak) - - def set_sk(self, sk): - self.service_info.credentials.set_sk(sk) - - def set_session_token(self, session_token): - self.service_info.credentials.set_session_token(session_token) - - def set_host(self, host): - self.service_info.host = host - - def set_scheme(self, scheme): - self.service_info.scheme = scheme - - def get(self, api, params, doseq=0): - if api not in self.api_info: - raise Exception("no such api") - api_info = self.api_info[api] - - r = self.prepare_request(api_info, params, doseq) - - Signer.sign(r, self.service_info.credentials) - - url = r.build(doseq) - resp = self.session.get( - url, headers=r.headers, timeout=(self.service_info.connection_timeout, self.service_info.socket_timeout) - ) - if resp.status_code == 200: - return resp.text - else: - raise Exception(resp.text) - - def post(self, api, params, form): - if api not in self.api_info: - raise Exception("no such api") - api_info = self.api_info[api] - r = self.prepare_request(api_info, params) - r.headers["Content-Type"] = "application/x-www-form-urlencoded" - r.form = self.merge(api_info.form, form) - r.body = urlencode(r.form, True) - Signer.sign(r, self.service_info.credentials) - - url = r.build() - - resp = self.session.post( - url, - headers=r.headers, - data=r.form, - timeout=(self.service_info.connection_timeout, self.service_info.socket_timeout), - ) - if resp.status_code == 200: - return resp.text - else: - raise Exception(resp.text) - - def json(self, api, params, body): - if api not in self.api_info: - raise Exception("no such api") - api_info = self.api_info[api] - r = self.prepare_request(api_info, params) - r.headers["Content-Type"] = "application/json" - r.body = body - - Signer.sign(r, self.service_info.credentials) - - url = r.build() - resp = self.session.post( - url, - headers=r.headers, - data=r.body, - timeout=(self.service_info.connection_timeout, self.service_info.socket_timeout), - ) - if resp.status_code == 200: - return json.dumps(resp.json()) - else: - raise Exception(resp.text.encode("utf-8")) - - def put(self, url, file_path, headers): - with open(file_path, "rb") as f: - resp = self.session.put(url, headers=headers, data=f) - if resp.status_code == 200: - return True, resp.text.encode("utf-8") - else: - return False, resp.text.encode("utf-8") - - def put_data(self, url, data, headers): - resp = self.session.put(url, headers=headers, data=data) - if resp.status_code == 200: - return True, resp.text.encode("utf-8") - else: - return False, resp.text.encode("utf-8") - - def prepare_request(self, api_info, params, doseq=0): - for key in params: - if type(params[key]) == int or type(params[key]) == float or type(params[key]) == bool: - params[key] = str(params[key]) - elif type(params[key]) == list: - if not doseq: - params[key] = ",".join(params[key]) - - connection_timeout = self.service_info.connection_timeout - socket_timeout = self.service_info.socket_timeout - - r = Request() - r.set_schema(self.service_info.scheme) - r.set_method(api_info.method) - r.set_connection_timeout(connection_timeout) - r.set_socket_timeout(socket_timeout) - - headers = self.merge(api_info.header, self.service_info.header) - headers["Host"] = self.service_info.host - headers["User-Agent"] = "volc-sdk-python/" + VERSION - r.set_headers(headers) - - query = self.merge(api_info.query, params) - r.set_query(query) - - r.set_host(self.service_info.host) - r.set_path(api_info.path) - - return r - - @staticmethod - def merge(param1, param2): - od = OrderedDict() - for key in param1: - od[key] = param1[key] - - for key in param2: - od[key] = param2[key] - - return od - - -class Request: - def __init__(self): - self.schema = "" - self.method = "" - self.host = "" - self.path = "" - self.headers = OrderedDict() - self.query = OrderedDict() - self.body = "" - self.form = {} - self.connection_timeout = 0 - self.socket_timeout = 0 - - def set_schema(self, schema): - self.schema = schema - - def set_method(self, method): - self.method = method - - def set_host(self, host): - self.host = host - - def set_path(self, path): - self.path = path - - def set_headers(self, headers): - self.headers = headers - - def set_query(self, query): - self.query = query - - def set_body(self, body): - self.body = body - - def set_connection_timeout(self, connection_timeout): - self.connection_timeout = connection_timeout - - def set_socket_timeout(self, socket_timeout): - self.socket_timeout = socket_timeout - - def build(self, doseq=0): - return self.schema + "://" + self.host + self.path + "?" + urlencode(self.query, doseq) - - -class ServiceInfo: - def __init__(self, host, header, credentials, connection_timeout, socket_timeout, scheme="http"): - self.host = host - self.header = header - self.credentials = credentials - self.connection_timeout = connection_timeout - self.socket_timeout = socket_timeout - self.scheme = scheme - - -class ApiInfo: - def __init__(self, method, path, query, form, header): - self.method = method - self.path = path - self.query = query - self.form = form - self.header = header - - def __str__(self): - return "method: " + self.method + ", path: " + self.path diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/util.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/util.py deleted file mode 100644 index 178d63714e..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/base/util.py +++ /dev/null @@ -1,44 +0,0 @@ -import hashlib -import hmac -import operator -from functools import reduce -from urllib.parse import quote - - -class Util: - @staticmethod - def norm_uri(path): - return quote(path).replace("%2F", "/").replace("+", "%20") - - @staticmethod - def norm_query(params): - query = "" - for key in sorted(params.keys()): - if type(params[key]) == list: - for k in params[key]: - query = query + quote(key, safe="-_.~") + "=" + quote(k, safe="-_.~") + "&" - else: - query = query + quote(key, safe="-_.~") + "=" + quote(params[key], safe="-_.~") + "&" - query = query[:-1] - return query.replace("+", "%20") - - @staticmethod - def hmac_sha256(key, content): - return hmac.new(key, bytes(content, encoding="utf-8"), hashlib.sha256).digest() - - @staticmethod - def sha256(content): - if isinstance(content, str) is True: - return hashlib.sha256(content.encode("utf-8")).hexdigest() - else: - return hashlib.sha256(content).hexdigest() - - @staticmethod - def to_hex(content): - lst = [] - for ch in content: - hv = hex(ch).replace("0x", "") - if len(hv) == 1: - hv = "0" + hv - lst.append(hv) - return reduce(operator.add, lst) diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/common.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/common.py deleted file mode 100644 index 3825fd6574..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/common.py +++ /dev/null @@ -1,77 +0,0 @@ -import json -import random -from datetime import datetime - - -class ChatRole: - USER = "user" - ASSISTANT = "assistant" - SYSTEM = "system" - FUNCTION = "function" - - -class _Dict(dict): - __setattr__ = dict.__setitem__ - __getattr__ = dict.__getitem__ - - def __missing__(self, key): - return None - - -def dict_to_object(dict_obj): - # 支持嵌套类型 - if isinstance(dict_obj, list): - insts = [] - for i in dict_obj: - insts.append(dict_to_object(i)) - return insts - - if isinstance(dict_obj, dict): - inst = _Dict() - for k, v in dict_obj.items(): - inst[k] = dict_to_object(v) - return inst - - return dict_obj - - -def json_to_object(json_str, req_id=None): - obj = dict_to_object(json.loads(json_str)) - if obj and isinstance(obj, dict) and req_id: - obj["req_id"] = req_id - return obj - - -def gen_req_id(): - return datetime.now().strftime("%Y%m%d%H%M%S") + format(random.randint(0, 2**64 - 1), "020X") - - -class SSEDecoder: - def __init__(self, source): - self.source = source - - def _read(self): - data = b"" - for chunk in self.source: - for line in chunk.splitlines(True): - data += line - if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")): - yield data - data = b"" - if data: - yield data - - def next(self): - for chunk in self._read(): - for line in chunk.splitlines(): - # skip comment - if line.startswith(b":"): - continue - - if b":" in line: - field, value = line.split(b":", 1) - else: - field, value = line, b"" - - if field == b"data" and len(value) > 0: - yield value diff --git a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/maas.py b/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/maas.py deleted file mode 100644 index a3836685f1..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/legacy/volc_sdk/maas.py +++ /dev/null @@ -1,198 +0,0 @@ -import copy -import json -from collections.abc import Iterator - -from .base.auth import Credentials, Signer -from .base.service import ApiInfo, Service, ServiceInfo -from .common import SSEDecoder, dict_to_object, gen_req_id, json_to_object - - -class MaasService(Service): - def __init__(self, host, region, connection_timeout=60, socket_timeout=60): - service_info = self.get_service_info(host, region, connection_timeout, socket_timeout) - self._apikey = None - api_info = self.get_api_info() - super().__init__(service_info, api_info) - - def set_apikey(self, apikey): - self._apikey = apikey - - @staticmethod - def get_service_info(host, region, connection_timeout, socket_timeout): - service_info = ServiceInfo( - host, - {"Accept": "application/json"}, - Credentials("", "", "ml_maas", region), - connection_timeout, - socket_timeout, - "https", - ) - return service_info - - @staticmethod - def get_api_info(): - api_info = { - "chat": ApiInfo("POST", "/api/v2/endpoint/{endpoint_id}/chat", {}, {}, {}), - "embeddings": ApiInfo("POST", "/api/v2/endpoint/{endpoint_id}/embeddings", {}, {}, {}), - } - return api_info - - def chat(self, endpoint_id, req): - req["stream"] = False - return self._request(endpoint_id, "chat", req) - - def stream_chat(self, endpoint_id, req): - req_id = gen_req_id() - self._validate("chat", req_id) - apikey = self._apikey - - try: - req["stream"] = True - res = self._call(endpoint_id, "chat", req_id, {}, json.dumps(req).encode("utf-8"), apikey, stream=True) - - decoder = SSEDecoder(res) - - def iter_fn(): - for data in decoder.next(): - if data == b"[DONE]": - return - - try: - res = json_to_object(str(data, encoding="utf-8"), req_id=req_id) - except Exception: - raise - - if res.error is not None and res.error.code_n != 0: - raise MaasError( - res.error.code_n, - res.error.code, - res.error.message, - req_id, - ) - yield res - - return iter_fn() - except MaasError: - raise - except Exception as e: - raise new_client_sdk_request_error(str(e)) - - def embeddings(self, endpoint_id, req): - return self._request(endpoint_id, "embeddings", req) - - def _request(self, endpoint_id, api, req, params={}): - req_id = gen_req_id() - - self._validate(api, req_id) - - apikey = self._apikey - - try: - res = self._call(endpoint_id, api, req_id, params, json.dumps(req).encode("utf-8"), apikey) - resp = dict_to_object(res.json()) - if resp and isinstance(resp, dict): - resp["req_id"] = req_id - return resp - - except MaasError as e: - raise e - except Exception as e: - raise new_client_sdk_request_error(str(e), req_id) - - def _validate(self, api, req_id): - credentials_exist = ( - self.service_info.credentials is not None - and self.service_info.credentials.sk is not None - and self.service_info.credentials.ak is not None - ) - - if not self._apikey and not credentials_exist: - raise new_client_sdk_request_error("no valid credential", req_id) - - if api not in self.api_info: - raise new_client_sdk_request_error("no such api", req_id) - - def _call(self, endpoint_id, api, req_id, params, body, apikey=None, stream=False): - api_info = copy.deepcopy(self.api_info[api]) - api_info.path = api_info.path.format(endpoint_id=endpoint_id) - - r = self.prepare_request(api_info, params) - r.headers["x-tt-logid"] = req_id - r.headers["Content-Type"] = "application/json" - r.body = body - - if apikey is None: - Signer.sign(r, self.service_info.credentials) - elif apikey is not None: - r.headers["Authorization"] = "Bearer " + apikey - - url = r.build() - res = self.session.post( - url, - headers=r.headers, - data=r.body, - timeout=( - self.service_info.connection_timeout, - self.service_info.socket_timeout, - ), - stream=stream, - ) - - if res.status_code != 200: - raw = res.text.encode() - res.close() - try: - resp = json_to_object(str(raw, encoding="utf-8"), req_id=req_id) - except Exception: - raise new_client_sdk_request_error(raw, req_id) - - if resp.error: - raise MaasError(resp.error.code_n, resp.error.code, resp.error.message, req_id) - else: - raise new_client_sdk_request_error(resp, req_id) - - return res - - -class MaasError(Exception): - def __init__(self, code_n, code, message, req_id): - self.code_n = code_n - self.code = code - self.message = message - self.req_id = req_id - - def __str__(self): - return ( - "Detailed exception information is listed below.\n" - + "req_id: {}\n" - + "code_n: {}\n" - + "code: {}\n" - + "message: {}" - ).format(self.req_id, self.code_n, self.code, self.message) - - -def new_client_sdk_request_error(raw, req_id=""): - return MaasError(1709701, "ClientSDKRequestError", "MaaS SDK request error: {}".format(raw), req_id) - - -class BinaryResponseContent: - def __init__(self, response, request_id) -> None: - self.response = response - self.request_id = request_id - - def stream_to_file(self, file: str) -> None: - is_first = True - error_bytes = b"" - with open(file, mode="wb") as f: - for data in self.response: - if len(error_bytes) > 0 or (is_first and '"error":' in str(data)): - error_bytes += data - else: - f.write(data) - - if len(error_bytes) > 0: - resp = json_to_object(str(error_bytes, encoding="utf-8"), req_id=self.request_id) - raise MaasError(resp.error.code_n, resp.error.code, resp.error.message, self.request_id) - - def iter_bytes(self) -> Iterator[bytes]: - yield from self.response diff --git a/api/core/model_runtime/model_providers/volcengine_maas/llm/__init__.py b/api/core/model_runtime/model_providers/volcengine_maas/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py b/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py deleted file mode 100644 index dec6c9d789..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py +++ /dev/null @@ -1,388 +0,0 @@ -import logging -from collections.abc import Generator - -from volcenginesdkarkruntime.types.chat import ChatCompletion, ChatCompletionChunk - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - ModelPropertyKey, - ModelType, - ParameterRule, - ParameterType, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.volcengine_maas.client import ArkClientV3 -from core.model_runtime.model_providers.volcengine_maas.legacy.client import MaaSClient -from core.model_runtime.model_providers.volcengine_maas.legacy.errors import ( - AuthErrors, - BadRequestErrors, - ConnectionErrors, - MaasError, - RateLimitErrors, - ServerUnavailableErrors, -) -from core.model_runtime.model_providers.volcengine_maas.llm.models import ( - get_model_config, - get_v2_req_params, - get_v3_req_params, -) - -logger = logging.getLogger(__name__) - - -class VolcengineMaaSLargeLanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - if ArkClientV3.is_legacy(credentials): - return self._generate_v2(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - return self._generate_v3(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate credentials - """ - if ArkClientV3.is_legacy(credentials): - return self._validate_credentials_v2(credentials) - return self._validate_credentials_v3(credentials) - - @staticmethod - def _validate_credentials_v2(credentials: dict) -> None: - client = MaaSClient.from_credential(credentials) - try: - client.chat( - { - "max_new_tokens": 16, - "temperature": 0.7, - "top_p": 0.9, - "top_k": 15, - }, - [UserPromptMessage(content="ping\nAnswer: ")], - ) - except MaasError as e: - raise CredentialsValidateFailedError(e.message) - - @staticmethod - def _validate_credentials_v3(credentials: dict) -> None: - client = ArkClientV3.from_credentials(credentials) - try: - client.chat( - max_tokens=16, - temperature=0.7, - top_p=0.9, - messages=[UserPromptMessage(content="ping\nAnswer: ")], - ) - except Exception as e: - raise CredentialsValidateFailedError(e) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool] | None = None, - ) -> int: - if ArkClientV3.is_legacy(credentials): - return self._get_num_tokens_v2(prompt_messages) - return self._get_num_tokens_v3(prompt_messages) - - def _get_num_tokens_v2(self, messages: list[PromptMessage]) -> int: - if len(messages) == 0: - return 0 - num_tokens = 0 - messages_dict = [MaaSClient.convert_prompt_message_to_maas_message(m) for m in messages] - for message in messages_dict: - for key, value in message.items(): - num_tokens += self._get_num_tokens_by_gpt2(str(key)) - num_tokens += self._get_num_tokens_by_gpt2(str(value)) - - return num_tokens - - def _get_num_tokens_v3(self, messages: list[PromptMessage]) -> int: - if len(messages) == 0: - return 0 - num_tokens = 0 - messages_dict = [ArkClientV3.convert_prompt_message(m) for m in messages] - for message in messages_dict: - for key, value in message.items(): - num_tokens += self._get_num_tokens_by_gpt2(str(key)) - num_tokens += self._get_num_tokens_by_gpt2(str(value)) - - return num_tokens - - def _generate_v2( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - client = MaaSClient.from_credential(credentials) - req_params = get_v2_req_params(credentials, model_parameters, stop) - extra_model_kwargs = {} - if tools: - extra_model_kwargs["tools"] = [MaaSClient.transform_tool_prompt_to_maas_config(tool) for tool in tools] - resp = MaaSClient.wrap_exception(lambda: client.chat(req_params, prompt_messages, stream, **extra_model_kwargs)) - - def _handle_stream_chat_response() -> Generator: - for index, r in enumerate(resp): - choices = r["choices"] - if not choices: - continue - choice = choices[0] - message = choice["message"] - usage = None - if r.get("usage"): - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=r["usage"]["prompt_tokens"], - completion_tokens=r["usage"]["completion_tokens"], - ) - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=AssistantPromptMessage(content=message["content"] or "", tool_calls=[]), - usage=usage, - finish_reason=choice.get("finish_reason"), - ), - ) - - def _handle_chat_response() -> LLMResult: - choices = resp["choices"] - if not choices: - raise ValueError("No choices found") - - choice = choices[0] - message = choice["message"] - - # parse tool calls - tool_calls = [] - if message["tool_calls"]: - for call in message["tool_calls"]: - tool_call = AssistantPromptMessage.ToolCall( - id=call["function"]["name"], - type=call["type"], - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=call["function"]["name"], arguments=call["function"]["arguments"] - ), - ) - tool_calls.append(tool_call) - - usage = resp["usage"] - return LLMResult( - model=model, - prompt_messages=prompt_messages, - message=AssistantPromptMessage( - content=message["content"] or "", - tool_calls=tool_calls, - ), - usage=self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=usage["prompt_tokens"], - completion_tokens=usage["completion_tokens"], - ), - ) - - if not stream: - return _handle_chat_response() - return _handle_stream_chat_response() - - def _generate_v3( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - client = ArkClientV3.from_credentials(credentials) - req_params = get_v3_req_params(credentials, model_parameters, stop) - if tools: - req_params["tools"] = tools - - def _handle_stream_chat_response(chunks: Generator[ChatCompletionChunk]) -> Generator: - for chunk in chunks: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage( - content=chunk.choices[0].delta.content if chunk.choices else "", tool_calls=[] - ), - usage=self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=chunk.usage.prompt_tokens, - completion_tokens=chunk.usage.completion_tokens, - ) - if chunk.usage - else None, - finish_reason=chunk.choices[0].finish_reason if chunk.choices else None, - ), - ) - - def _handle_chat_response(resp: ChatCompletion) -> LLMResult: - choice = resp.choices[0] - message = choice.message - # parse tool calls - tool_calls = [] - if message.tool_calls: - for call in message.tool_calls: - tool_call = AssistantPromptMessage.ToolCall( - id=call.id, - type=call.type, - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=call.function.name, arguments=call.function.arguments - ), - ) - tool_calls.append(tool_call) - - usage = resp.usage - return LLMResult( - model=model, - prompt_messages=prompt_messages, - message=AssistantPromptMessage( - content=message.content or "", - tool_calls=tool_calls, - ), - usage=self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=usage.prompt_tokens, - completion_tokens=usage.completion_tokens, - ), - ) - - if not stream: - resp = client.chat(prompt_messages, **req_params) - return _handle_chat_response(resp) - - chunks = client.stream_chat(prompt_messages, **req_params) - return _handle_stream_chat_response(chunks) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - model_config = get_model_config(credentials) - - rules = [ - ParameterRule( - name="temperature", - type=ParameterType.FLOAT, - use_template="temperature", - label=I18nObject(zh_Hans="温度", en_US="Temperature"), - ), - ParameterRule( - name="top_p", - type=ParameterType.FLOAT, - use_template="top_p", - label=I18nObject(zh_Hans="Top P", en_US="Top P"), - ), - ParameterRule( - name="top_k", type=ParameterType.INT, min=1, default=1, label=I18nObject(zh_Hans="Top K", en_US="Top K") - ), - ParameterRule( - name="presence_penalty", - type=ParameterType.FLOAT, - use_template="presence_penalty", - label=I18nObject( - en_US="Presence Penalty", - zh_Hans="存在惩罚", - ), - min=-2.0, - max=2.0, - ), - ParameterRule( - name="frequency_penalty", - type=ParameterType.FLOAT, - use_template="frequency_penalty", - label=I18nObject( - en_US="Frequency Penalty", - zh_Hans="频率惩罚", - ), - min=-2.0, - max=2.0, - ), - ParameterRule( - name="max_tokens", - type=ParameterType.INT, - use_template="max_tokens", - min=1, - max=model_config.properties.max_tokens, - default=512, - label=I18nObject(zh_Hans="最大生成长度", en_US="Max Tokens"), - ), - ] - - model_properties = {} - model_properties[ModelPropertyKey.CONTEXT_SIZE] = model_config.properties.context_size - model_properties[ModelPropertyKey.MODE] = model_config.properties.mode.value - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.LLM, - model_properties=model_properties, - parameter_rules=rules, - features=model_config.features, - ) - - return entity - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: ConnectionErrors.values(), - InvokeServerUnavailableError: ServerUnavailableErrors.values(), - InvokeRateLimitError: RateLimitErrors.values(), - InvokeAuthorizationError: AuthErrors.values(), - InvokeBadRequestError: BadRequestErrors.values(), - } diff --git a/api/core/model_runtime/model_providers/volcengine_maas/llm/models.py b/api/core/model_runtime/model_providers/volcengine_maas/llm/models.py deleted file mode 100644 index d8be14b024..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/llm/models.py +++ /dev/null @@ -1,142 +0,0 @@ -from pydantic import BaseModel - -from core.model_runtime.entities.llm_entities import LLMMode -from core.model_runtime.entities.model_entities import ModelFeature - - -class ModelProperties(BaseModel): - context_size: int - max_tokens: int - mode: LLMMode - - -class ModelConfig(BaseModel): - properties: ModelProperties - features: list[ModelFeature] - - -configs: dict[str, ModelConfig] = { - "Doubao-pro-4k": ModelConfig( - properties=ModelProperties(context_size=4096, max_tokens=4096, mode=LLMMode.CHAT), - features=[ModelFeature.TOOL_CALL], - ), - "Doubao-lite-4k": ModelConfig( - properties=ModelProperties(context_size=4096, max_tokens=4096, mode=LLMMode.CHAT), - features=[ModelFeature.TOOL_CALL], - ), - "Doubao-pro-32k": ModelConfig( - properties=ModelProperties(context_size=32768, max_tokens=4096, mode=LLMMode.CHAT), - features=[ModelFeature.TOOL_CALL], - ), - "Doubao-lite-32k": ModelConfig( - properties=ModelProperties(context_size=32768, max_tokens=4096, mode=LLMMode.CHAT), - features=[ModelFeature.TOOL_CALL], - ), - "Doubao-pro-128k": ModelConfig( - properties=ModelProperties(context_size=131072, max_tokens=4096, mode=LLMMode.CHAT), - features=[ModelFeature.TOOL_CALL], - ), - "Doubao-lite-128k": ModelConfig( - properties=ModelProperties(context_size=131072, max_tokens=4096, mode=LLMMode.CHAT), features=[] - ), - "Skylark2-pro-4k": ModelConfig( - properties=ModelProperties(context_size=4096, max_tokens=4096, mode=LLMMode.CHAT), features=[] - ), - "Llama3-8B": ModelConfig( - properties=ModelProperties(context_size=8192, max_tokens=8192, mode=LLMMode.CHAT), features=[] - ), - "Llama3-70B": ModelConfig( - properties=ModelProperties(context_size=8192, max_tokens=8192, mode=LLMMode.CHAT), features=[] - ), - "Moonshot-v1-8k": ModelConfig( - properties=ModelProperties(context_size=8192, max_tokens=4096, mode=LLMMode.CHAT), - features=[ModelFeature.TOOL_CALL], - ), - "Moonshot-v1-32k": ModelConfig( - properties=ModelProperties(context_size=32768, max_tokens=16384, mode=LLMMode.CHAT), - features=[ModelFeature.TOOL_CALL], - ), - "Moonshot-v1-128k": ModelConfig( - properties=ModelProperties(context_size=131072, max_tokens=65536, mode=LLMMode.CHAT), - features=[ModelFeature.TOOL_CALL], - ), - "GLM3-130B": ModelConfig( - properties=ModelProperties(context_size=8192, max_tokens=4096, mode=LLMMode.CHAT), - features=[ModelFeature.TOOL_CALL], - ), - "GLM3-130B-Fin": ModelConfig( - properties=ModelProperties(context_size=8192, max_tokens=4096, mode=LLMMode.CHAT), - features=[ModelFeature.TOOL_CALL], - ), - "Mistral-7B": ModelConfig( - properties=ModelProperties(context_size=8192, max_tokens=2048, mode=LLMMode.CHAT), features=[] - ), -} - - -def get_model_config(credentials: dict) -> ModelConfig: - base_model = credentials.get("base_model_name", "") - model_configs = configs.get(base_model) - if not model_configs: - return ModelConfig( - properties=ModelProperties( - context_size=int(credentials.get("context_size", 0)), - max_tokens=int(credentials.get("max_tokens", 0)), - mode=LLMMode.value_of(credentials.get("mode", "chat")), - ), - features=[], - ) - return model_configs - - -def get_v2_req_params(credentials: dict, model_parameters: dict, stop: list[str] | None = None): - req_params = {} - # predefined properties - model_configs = get_model_config(credentials) - if model_configs: - req_params["max_prompt_tokens"] = model_configs.properties.context_size - req_params["max_new_tokens"] = model_configs.properties.max_tokens - - # model parameters - if model_parameters.get("max_tokens"): - req_params["max_new_tokens"] = model_parameters.get("max_tokens") - if model_parameters.get("temperature"): - req_params["temperature"] = model_parameters.get("temperature") - if model_parameters.get("top_p"): - req_params["top_p"] = model_parameters.get("top_p") - if model_parameters.get("top_k"): - req_params["top_k"] = model_parameters.get("top_k") - if model_parameters.get("presence_penalty"): - req_params["presence_penalty"] = model_parameters.get("presence_penalty") - if model_parameters.get("frequency_penalty"): - req_params["frequency_penalty"] = model_parameters.get("frequency_penalty") - - if stop: - req_params["stop"] = stop - - return req_params - - -def get_v3_req_params(credentials: dict, model_parameters: dict, stop: list[str] | None = None): - req_params = {} - # predefined properties - model_configs = get_model_config(credentials) - if model_configs: - req_params["max_tokens"] = model_configs.properties.max_tokens - - # model parameters - if model_parameters.get("max_tokens"): - req_params["max_tokens"] = model_parameters.get("max_tokens") - if model_parameters.get("temperature"): - req_params["temperature"] = model_parameters.get("temperature") - if model_parameters.get("top_p"): - req_params["top_p"] = model_parameters.get("top_p") - if model_parameters.get("presence_penalty"): - req_params["presence_penalty"] = model_parameters.get("presence_penalty") - if model_parameters.get("frequency_penalty"): - req_params["frequency_penalty"] = model_parameters.get("frequency_penalty") - - if stop: - req_params["stop"] = stop - - return req_params diff --git a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/__init__.py b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/models.py b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/models.py deleted file mode 100644 index ce4f0c3ab1..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/models.py +++ /dev/null @@ -1,28 +0,0 @@ -from pydantic import BaseModel - - -class ModelProperties(BaseModel): - context_size: int - max_chunks: int - - -class ModelConfig(BaseModel): - properties: ModelProperties - - -ModelConfigs = { - "Doubao-embedding": ModelConfig(properties=ModelProperties(context_size=4096, max_chunks=32)), -} - - -def get_model_config(credentials: dict) -> ModelConfig: - base_model = credentials.get("base_model_name", "") - model_configs = ModelConfigs.get(base_model) - if not model_configs: - return ModelConfig( - properties=ModelProperties( - context_size=int(credentials.get("context_size", 0)), - max_chunks=int(credentials.get("max_chunks", 0)), - ) - ) - return model_configs diff --git a/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.py b/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.py deleted file mode 100644 index 10f9be2d08..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class VolcengineMaaSProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.yaml b/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.yaml deleted file mode 100644 index 13e00da76f..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/volcengine_maas.yaml +++ /dev/null @@ -1,266 +0,0 @@ -provider: volcengine_maas -label: - en_US: Volcengine -description: - en_US: Volcengine Ark models. - zh_Hans: 火山方舟提供的模型,例如 Doubao-pro-4k、Doubao-pro-32k 和 Doubao-pro-128k。 -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg - zh_Hans: icon_l_zh.svg -background: "#F9FAFB" -help: - title: - en_US: Get your Access Key and Secret Access Key from Volcengine Console - zh_Hans: 从火山引擎控制台获取您的 Access Key 和 Secret Access Key - url: - en_US: https://console.volcengine.com/iam/keymanage/ -supported_model_types: - - llm - - text-embedding -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your Model Name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: auth_method - required: true - label: - en_US: Authentication Method - zh_Hans: 鉴权方式 - type: select - default: aksk - options: - - label: - en_US: API Key - value: api_key - - label: - en_US: Access Key / Secret Access Key - value: aksk - placeholder: - en_US: Enter your Authentication Method - zh_Hans: 选择鉴权方式 - - variable: volc_access_key_id - required: true - show_on: - - variable: auth_method - value: aksk - label: - en_US: Access Key - zh_Hans: Access Key - type: secret-input - placeholder: - en_US: Enter your Access Key - zh_Hans: 输入您的 Access Key - - variable: volc_secret_access_key - required: true - show_on: - - variable: auth_method - value: aksk - label: - en_US: Secret Access Key - zh_Hans: Secret Access Key - type: secret-input - placeholder: - en_US: Enter your Secret Access Key - zh_Hans: 输入您的 Secret Access Key - - variable: volc_api_key - required: true - show_on: - - variable: auth_method - value: api_key - label: - en_US: API Key - type: secret-input - placeholder: - en_US: Enter your API Key - zh_Hans: 输入您的 API Key - - variable: volc_region - required: true - label: - en_US: Volcengine Region - zh_Hans: 火山引擎地域 - type: text-input - default: cn-beijing - placeholder: - en_US: Enter Volcengine Region - zh_Hans: 输入火山引擎地域 - - variable: api_endpoint_host - required: true - label: - en_US: API Endpoint Host - zh_Hans: API Endpoint Host - type: text-input - default: https://ark.cn-beijing.volces.com/api/v3 - placeholder: - en_US: Enter your API Endpoint Host - zh_Hans: 输入 API Endpoint Host - - variable: endpoint_id - required: true - label: - en_US: Endpoint ID - zh_Hans: Endpoint ID - type: text-input - placeholder: - en_US: Enter your Endpoint ID - zh_Hans: 输入您的 Endpoint ID - - variable: base_model_name - label: - en_US: Base Model - zh_Hans: 基础模型 - type: select - required: true - options: - - label: - en_US: Doubao-pro-4k - value: Doubao-pro-4k - show_on: - - variable: __model_type - value: llm - - label: - en_US: Doubao-lite-4k - value: Doubao-lite-4k - show_on: - - variable: __model_type - value: llm - - label: - en_US: Doubao-pro-32k - value: Doubao-pro-32k - show_on: - - variable: __model_type - value: llm - - label: - en_US: Doubao-lite-32k - value: Doubao-lite-32k - show_on: - - variable: __model_type - value: llm - - label: - en_US: Doubao-pro-128k - value: Doubao-pro-128k - show_on: - - variable: __model_type - value: llm - - label: - en_US: Doubao-lite-128k - value: Doubao-lite-128k - show_on: - - variable: __model_type - value: llm - - label: - en_US: Llama3-8B - value: Llama3-8B - show_on: - - variable: __model_type - value: llm - - label: - en_US: Llama3-70B - value: Llama3-70B - show_on: - - variable: __model_type - value: llm - - label: - en_US: Moonshot-v1-8k - value: Moonshot-v1-8k - show_on: - - variable: __model_type - value: llm - - label: - en_US: Moonshot-v1-32k - value: Moonshot-v1-32k - show_on: - - variable: __model_type - value: llm - - label: - en_US: Moonshot-v1-128k - value: Moonshot-v1-128k - show_on: - - variable: __model_type - value: llm - - label: - en_US: GLM3-130B - value: GLM3-130B - show_on: - - variable: __model_type - value: llm - - label: - en_US: GLM3-130B-Fin - value: GLM3-130B-Fin - show_on: - - variable: __model_type - value: llm - - label: - en_US: Mistral-7B - value: Mistral-7B - show_on: - - variable: __model_type - value: llm - - label: - en_US: Doubao-embedding - value: Doubao-embedding - show_on: - - variable: __model_type - value: text-embedding - - label: - en_US: Custom - zh_Hans: 自定义 - value: Custom - - variable: mode - required: true - show_on: - - variable: __model_type - value: llm - - variable: base_model_name - value: Custom - label: - zh_Hans: 模型类型 - en_US: Completion Mode - type: select - default: chat - placeholder: - zh_Hans: 选择对话类型 - en_US: Select Completion Mode - options: - - value: completion - label: - en_US: Completion - zh_Hans: 补全 - - value: chat - label: - en_US: Chat - zh_Hans: 对话 - - variable: context_size - required: true - show_on: - - variable: base_model_name - value: Custom - label: - zh_Hans: 模型上下文长度 - en_US: Model Context Size - type: text-input - default: "4096" - placeholder: - zh_Hans: 输入您的模型上下文长度 - en_US: Enter your Model Context Size - - variable: max_tokens - required: true - show_on: - - variable: __model_type - value: llm - - variable: base_model_name - value: Custom - label: - zh_Hans: 最大 token 上限 - en_US: Upper Bound for Max Tokens - default: "4096" - type: text-input - placeholder: - zh_Hans: 输入您的模型最大 token 上限 - en_US: Enter your model Upper Bound for Max Tokens diff --git a/api/core/model_runtime/model_providers/wenxin/__init__.py b/api/core/model_runtime/model_providers/wenxin/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/wenxin/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/wenxin/_assets/icon_l_en.png deleted file mode 100644 index fb50487cce..0000000000 Binary files a/api/core/model_runtime/model_providers/wenxin/_assets/icon_l_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/wenxin/_assets/icon_l_zh.png b/api/core/model_runtime/model_providers/wenxin/_assets/icon_l_zh.png deleted file mode 100644 index 669d3c7a25..0000000000 Binary files a/api/core/model_runtime/model_providers/wenxin/_assets/icon_l_zh.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/wenxin/_assets/icon_s_en.png b/api/core/model_runtime/model_providers/wenxin/_assets/icon_s_en.png deleted file mode 100644 index 923919958a..0000000000 Binary files a/api/core/model_runtime/model_providers/wenxin/_assets/icon_s_en.png and /dev/null differ diff --git a/api/core/model_runtime/model_providers/wenxin/_common.py b/api/core/model_runtime/model_providers/wenxin/_common.py deleted file mode 100644 index d72d1bd83a..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/_common.py +++ /dev/null @@ -1,194 +0,0 @@ -from datetime import datetime, timedelta -from threading import Lock - -from requests import post - -from core.model_runtime.model_providers.wenxin.wenxin_errors import ( - BadRequestError, - InternalServerError, - InvalidAPIKeyError, - InvalidAuthenticationError, - RateLimitReachedError, -) - -baidu_access_tokens: dict[str, "BaiduAccessToken"] = {} -baidu_access_tokens_lock = Lock() - - -class BaiduAccessToken: - api_key: str - access_token: str - expires: datetime - - def __init__(self, api_key: str) -> None: - self.api_key = api_key - self.access_token = "" - self.expires = datetime.now() + timedelta(days=3) - - @staticmethod - def _get_access_token(api_key: str, secret_key: str) -> str: - """ - request access token from Baidu - """ - try: - response = post( - url=f"https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={api_key}&client_secret={secret_key}", - headers={"Content-Type": "application/json", "Accept": "application/json"}, - ) - except Exception as e: - raise InvalidAuthenticationError(f"Failed to get access token from Baidu: {e}") - - resp = response.json() - if "error" in resp: - if resp["error"] == "invalid_client": - raise InvalidAPIKeyError(f'Invalid API key or secret key: {resp["error_description"]}') - elif resp["error"] == "unknown_error": - raise InternalServerError(f'Internal server error: {resp["error_description"]}') - elif resp["error"] == "invalid_request": - raise BadRequestError(f'Bad request: {resp["error_description"]}') - elif resp["error"] == "rate_limit_exceeded": - raise RateLimitReachedError(f'Rate limit reached: {resp["error_description"]}') - else: - raise Exception(f'Unknown error: {resp["error_description"]}') - - return resp["access_token"] - - @staticmethod - def get_access_token(api_key: str, secret_key: str) -> "BaiduAccessToken": - """ - LLM from Baidu requires access token to invoke the API. - however, we have api_key and secret_key, and access token is valid for 30 days. - so we can cache the access token for 3 days. (avoid memory leak) - - it may be more efficient to use a ticker to refresh access token, but it will cause - more complexity, so we just refresh access tokens when get_access_token is called. - """ - - # loop up cache, remove expired access token - baidu_access_tokens_lock.acquire() - now = datetime.now() - for key in list(baidu_access_tokens.keys()): - token = baidu_access_tokens[key] - if token.expires < now: - baidu_access_tokens.pop(key) - - if api_key not in baidu_access_tokens: - # if access token not in cache, request it - token = BaiduAccessToken(api_key) - baidu_access_tokens[api_key] = token - try: - # try to get access token - token_str = BaiduAccessToken._get_access_token(api_key, secret_key) - finally: - # release it to enhance performance - # btw, _get_access_token will raise exception if failed, release lock here to avoid deadlock - baidu_access_tokens_lock.release() - token.access_token = token_str - token.expires = now + timedelta(days=3) - return token - else: - # if access token in cache, return it - token = baidu_access_tokens[api_key] - baidu_access_tokens_lock.release() - return token - - -class _CommonWenxin: - api_bases = { - "ernie-bot": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-3.5-4k-0205", - "ernie-bot-4": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro", - "ernie-bot-8k": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions", - "ernie-bot-turbo": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant", - "ernie-3.5-8k": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions", - "ernie-3.5-8k-0205": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-3.5-8k-0205", - "ernie-3.5-8k-1222": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-3.5-8k-1222", - "ernie-3.5-4k-0205": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-3.5-4k-0205", - "ernie-3.5-128k": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-3.5-128k", - "ernie-4.0-8k": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro", - "ernie-4.0-8k-latest": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro", - "ernie-speed-8k": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie_speed", - "ernie-speed-128k": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-speed-128k", - "ernie-speed-appbuilder": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ai_apaas", - "ernie-lite-8k-0922": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant", - "ernie-lite-8k-0308": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-lite-8k", - "ernie-character-8k": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-char-8k", - "ernie-character-8k-0321": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-char-8k", - "ernie-4.0-turbo-8k": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-4.0-turbo-8k", - "ernie-4.0-turbo-8k-preview": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-4.0-turbo-8k-preview", - "yi_34b_chat": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/yi_34b_chat", - "embedding-v1": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/embedding-v1", - "bge-large-en": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/bge_large_en", - "bge-large-zh": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/bge_large_zh", - "tao-8k": "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings/tao_8k", - } - - function_calling_supports = [ - "ernie-bot", - "ernie-bot-8k", - "ernie-3.5-8k", - "ernie-3.5-8k-0205", - "ernie-3.5-8k-1222", - "ernie-3.5-4k-0205", - "ernie-3.5-128k", - "ernie-4.0-8k", - "ernie-4.0-turbo-8k", - "ernie-4.0-turbo-8k-preview", - "yi_34b_chat", - ] - - api_key: str = "" - secret_key: str = "" - - def __init__(self, api_key: str, secret_key: str): - self.api_key = api_key - self.secret_key = secret_key - - @staticmethod - def _to_credential_kwargs(credentials: dict) -> dict: - credentials_kwargs = {"api_key": credentials["api_key"], "secret_key": credentials["secret_key"]} - return credentials_kwargs - - def _handle_error(self, code: int, msg: str): - error_map = { - 1: InternalServerError, - 2: InternalServerError, - 3: BadRequestError, - 4: RateLimitReachedError, - 6: InvalidAuthenticationError, - 13: InvalidAPIKeyError, - 14: InvalidAPIKeyError, - 15: InvalidAPIKeyError, - 17: RateLimitReachedError, - 18: RateLimitReachedError, - 19: RateLimitReachedError, - 100: InvalidAPIKeyError, - 111: InvalidAPIKeyError, - 200: InternalServerError, - 336000: InternalServerError, - 336001: BadRequestError, - 336002: BadRequestError, - 336003: BadRequestError, - 336004: InvalidAuthenticationError, - 336005: InvalidAPIKeyError, - 336006: BadRequestError, - 336007: BadRequestError, - 336008: BadRequestError, - 336100: InternalServerError, - 336101: BadRequestError, - 336102: BadRequestError, - 336103: BadRequestError, - 336104: BadRequestError, - 336105: BadRequestError, - 336200: InternalServerError, - 336303: BadRequestError, - 337006: BadRequestError, - } - - if code in error_map: - raise error_map[code](msg) - else: - raise InternalServerError(f"Unknown error: {msg}") - - def _get_access_token(self) -> str: - token = BaiduAccessToken.get_access_token(self.api_key, self.secret_key) - return token.access_token diff --git a/api/core/model_runtime/model_providers/wenxin/llm/__init__.py b/api/core/model_runtime/model_providers/wenxin/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-128k.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-128k.yaml deleted file mode 100644 index b1b1ba1f69..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-128k.yaml +++ /dev/null @@ -1,37 +0,0 @@ -model: ernie-3.5-128k -label: - en_US: Ernie-3.5-128K -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 4096 - min: 2 - max: 4096 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: response_format - use_template: response_format - - name: disable_search - label: - zh_Hans: 禁用搜索 - en_US: Disable Search - type: boolean - help: - zh_Hans: 禁用模型自行进行外部搜索。 - en_US: Disable the model to perform external search. - required: false diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-4k-0205.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-4k-0205.yaml deleted file mode 100644 index 1e8cf96440..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-4k-0205.yaml +++ /dev/null @@ -1,38 +0,0 @@ -model: ernie-3.5-4k-0205 -label: - en_US: Ernie-3.5-4k-0205 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 2048 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: response_format - use_template: response_format - - name: disable_search - label: - zh_Hans: 禁用搜索 - en_US: Disable Search - type: boolean - help: - zh_Hans: 禁用模型自行进行外部搜索。 - en_US: Disable the model to perform external search. - required: false -deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k-0205.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k-0205.yaml deleted file mode 100644 index b308abcb32..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k-0205.yaml +++ /dev/null @@ -1,38 +0,0 @@ -model: ernie-3.5-8k-0205 -label: - en_US: Ernie-3.5-8K-0205 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 2048 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: response_format - use_template: response_format - - name: disable_search - label: - zh_Hans: 禁用搜索 - en_US: Disable Search - type: boolean - help: - zh_Hans: 禁用模型自行进行外部搜索。 - en_US: Disable the model to perform external search. - required: false -deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k-1222.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k-1222.yaml deleted file mode 100644 index c43588cfe1..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k-1222.yaml +++ /dev/null @@ -1,38 +0,0 @@ -model: ernie-3.5-8k-1222 -label: - en_US: Ernie-3.5-8K-1222 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 2048 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: response_format - use_template: response_format - - name: disable_search - label: - zh_Hans: 禁用搜索 - en_US: Disable Search - type: boolean - help: - zh_Hans: 禁用模型自行进行外部搜索。 - en_US: Disable the model to perform external search. - required: false -deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k.yaml deleted file mode 100644 index 145844a4ff..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-3.5-8k.yaml +++ /dev/null @@ -1,40 +0,0 @@ -model: ernie-3.5-8k -label: - en_US: Ernie-3.5-8K -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 2048 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 - - name: frequency_penalty - use_template: frequency_penalty - - name: response_format - use_template: response_format - - name: disable_search - label: - zh_Hans: 禁用搜索 - en_US: Disable Search - type: boolean - help: - zh_Hans: 禁用模型自行进行外部搜索。 - en_US: Disable the model to perform external search. - required: false diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-8k-latest.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-8k-latest.yaml deleted file mode 100644 index d23ae0dc48..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-8k-latest.yaml +++ /dev/null @@ -1,40 +0,0 @@ -model: ernie-4.0-8k-latest -label: - en_US: Ernie-4.0-8K-Latest -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 2048 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 - - name: frequency_penalty - use_template: frequency_penalty - - name: response_format - use_template: response_format - - name: disable_search - label: - zh_Hans: 禁用搜索 - en_US: Disable Search - type: boolean - help: - zh_Hans: 禁用模型自行进行外部搜索。 - en_US: Disable the model to perform external search. - required: false diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-8k.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-8k.yaml deleted file mode 100644 index 9ebb5c8c4f..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-8k.yaml +++ /dev/null @@ -1,40 +0,0 @@ -model: ernie-4.0-8k -label: - en_US: Ernie-4.0-8K -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 2048 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 - - name: frequency_penalty - use_template: frequency_penalty - - name: response_format - use_template: response_format - - name: disable_search - label: - zh_Hans: 禁用搜索 - en_US: Disable Search - type: boolean - help: - zh_Hans: 禁用模型自行进行外部搜索。 - en_US: Disable the model to perform external search. - required: false diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k-preview.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k-preview.yaml deleted file mode 100644 index 16df540220..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k-preview.yaml +++ /dev/null @@ -1,40 +0,0 @@ -model: ernie-4.0-turbo-8k-preview -label: - en_US: Ernie-4.0-turbo-8k-preview -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 2048 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 - - name: frequency_penalty - use_template: frequency_penalty - - name: response_format - use_template: response_format - - name: disable_search - label: - zh_Hans: 禁用搜索 - en_US: Disable Search - type: boolean - help: - zh_Hans: 禁用模型自行进行外部搜索。 - en_US: Disable the model to perform external search. - required: false diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k.yaml deleted file mode 100644 index 2887a510d0..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-4.0-turbo-8k.yaml +++ /dev/null @@ -1,40 +0,0 @@ -model: ernie-4.0-turbo-8k -label: - en_US: Ernie-4.0-turbo-8K -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 2048 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 - - name: frequency_penalty - use_template: frequency_penalty - - name: response_format - use_template: response_format - - name: disable_search - label: - zh_Hans: 禁用搜索 - en_US: Disable Search - type: boolean - help: - zh_Hans: 禁用模型自行进行外部搜索。 - en_US: Disable the model to perform external search. - required: false diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-4.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-4.yaml deleted file mode 100644 index f352787aec..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-4.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: ernie-bot-4 -label: - en_US: Ernie Bot 4 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4800 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - required: true - default: 256 - min: 1 - max: 4800 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: response_format - use_template: response_format - - name: disable_search - label: - zh_Hans: 禁用搜索 - en_US: Disable Search - type: boolean - help: - zh_Hans: 禁用模型自行进行外部搜索。 - en_US: Disable the model to perform external search. - required: false -deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-8k.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-8k.yaml deleted file mode 100644 index fa4b7dd800..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-8k.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: ernie-bot-8k -label: - en_US: Ernie Bot 8k -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8000 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - required: true - default: 1024 - min: 1 - max: 8000 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: response_format - use_template: response_format - - name: disable_search - label: - zh_Hans: 禁用搜索 - en_US: Disable Search - type: boolean - help: - zh_Hans: 禁用模型自行进行外部搜索。 - en_US: Disable the model to perform external search. - required: false -deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-turbo.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-turbo.yaml deleted file mode 100644 index c94aa2db88..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-bot-turbo.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: ernie-bot-turbo -label: - en_US: Ernie Bot Turbo -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 11200 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - required: true - default: 1024 - min: 1 - max: 11200 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: response_format - use_template: response_format -deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-bot.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-bot.yaml deleted file mode 100644 index 13985b7483..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-bot.yaml +++ /dev/null @@ -1,39 +0,0 @@ -model: ernie-bot -label: - en_US: Ernie Bot -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4800 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.8 - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - required: true - default: 256 - min: 1 - max: 4800 - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: disable_search - label: - zh_Hans: 禁用搜索 - en_US: Disable Search - type: boolean - help: - zh_Hans: 禁用模型自行进行外部搜索。 - en_US: Disable the model to perform external search. - required: false - - name: response_format - use_template: response_format -deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k-0321.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k-0321.yaml deleted file mode 100644 index 74451ff9e3..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k-0321.yaml +++ /dev/null @@ -1,31 +0,0 @@ -model: ernie-character-8k-0321 -label: - en_US: ERNIE-Character-8K-0321 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.95 - - name: top_p - use_template: top_p - min: 0 - max: 1.0 - default: 0.7 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 1024 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 -deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k.yaml deleted file mode 100644 index 4b11b3e895..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-character-8k.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: ernie-character-8k-0321 -label: - en_US: ERNIE-Character-8K -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.95 - - name: top_p - use_template: top_p - min: 0 - max: 1.0 - default: 0.7 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 1024 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0308.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0308.yaml deleted file mode 100644 index 97ecb03f87..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0308.yaml +++ /dev/null @@ -1,31 +0,0 @@ -model: ernie-lite-8k-0308 -label: - en_US: ERNIE-Lite-8K-0308 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.95 - - name: top_p - use_template: top_p - min: 0 - max: 1.0 - default: 0.7 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 2048 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 -deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0922.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0922.yaml deleted file mode 100644 index 7410ce51df..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-lite-8k-0922.yaml +++ /dev/null @@ -1,31 +0,0 @@ -model: ernie-lite-8k-0922 -label: - en_US: ERNIE-Lite-8K-0922 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.95 - - name: top_p - use_template: top_p - min: 0 - max: 1.0 - default: 0.7 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 1024 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 -deprecated: true diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-speed-128k.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-speed-128k.yaml deleted file mode 100644 index 331639624c..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-speed-128k.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: ernie-speed-128k -label: - en_US: ERNIE-Speed-128K -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 128000 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.95 - - name: top_p - use_template: top_p - min: 0 - max: 1.0 - default: 0.7 - - name: max_tokens - use_template: max_tokens - default: 4096 - min: 2 - max: 4096 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-speed-8k.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-speed-8k.yaml deleted file mode 100644 index 304c6d1f7e..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-speed-8k.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: ernie-speed-8k -label: - en_US: ERNIE-Speed-8K -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.95 - - name: top_p - use_template: top_p - min: 0 - max: 1.0 - default: 0.7 - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 2 - max: 2048 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie-speed-appbuilder.yaml b/api/core/model_runtime/model_providers/wenxin/llm/ernie-speed-appbuilder.yaml deleted file mode 100644 index c254ae0260..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie-speed-appbuilder.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: ernie-speed-appbuilder -label: - en_US: ERNIE-Speed-AppBuilder -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.95 - - name: top_p - use_template: top_p - min: 0 - max: 1.0 - default: 0.7 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 diff --git a/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py b/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py deleted file mode 100644 index 07b970f810..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/ernie_bot.py +++ /dev/null @@ -1,245 +0,0 @@ -from collections.abc import Generator -from enum import Enum -from json import dumps, loads -from typing import Any, Union - -from requests import Response, post - -from core.model_runtime.entities.message_entities import PromptMessageTool -from core.model_runtime.model_providers.wenxin._common import _CommonWenxin -from core.model_runtime.model_providers.wenxin.wenxin_errors import ( - BadRequestError, - InternalServerError, -) - - -class ErnieMessage: - class Role(Enum): - USER = "user" - ASSISTANT = "assistant" - FUNCTION = "function" - SYSTEM = "system" - - role: str = Role.USER.value - content: str - usage: dict[str, int] = None - stop_reason: str = "" - - def to_dict(self) -> dict[str, Any]: - return { - "role": self.role, - "content": self.content, - } - - def __init__(self, content: str, role: str = "user") -> None: - self.content = content - self.role = role - - -class ErnieBotModel(_CommonWenxin): - def generate( - self, - model: str, - stream: bool, - messages: list[ErnieMessage], - parameters: dict[str, Any], - timeout: int, - tools: list[PromptMessageTool], - stop: list[str], - user: str, - ) -> Union[Generator[ErnieMessage, None, None], ErnieMessage]: - # check parameters - self._check_parameters(model, parameters, tools, stop) - - # get access token - access_token = self._get_access_token() - - # generate request body - url = f"{self.api_bases[model]}?access_token={access_token}" - - # clone messages - messages_cloned = self._copy_messages(messages=messages) - - # build body - body = self._build_request_body( - model, messages=messages_cloned, stream=stream, parameters=parameters, tools=tools, stop=stop, user=user - ) - headers = { - "Content-Type": "application/json", - } - - resp = post(url=url, data=dumps(body), headers=headers, stream=stream) - - if resp.status_code != 200: - raise InternalServerError(f"Failed to invoke ernie bot: {resp.text}") - - if stream: - return self._handle_chat_stream_generate_response(resp) - return self._handle_chat_generate_response(resp) - - def _copy_messages(self, messages: list[ErnieMessage]) -> list[ErnieMessage]: - return [ErnieMessage(message.content, message.role) for message in messages] - - def _check_parameters( - self, model: str, parameters: dict[str, Any], tools: list[PromptMessageTool], stop: list[str] - ) -> None: - if model not in self.api_bases: - raise BadRequestError(f"Invalid model: {model}") - - # if model not in self.function_calling_supports and tools is not None and len(tools) > 0: - # raise BadRequestError(f'Model {model} does not support calling function.') - # ErnieBot supports function calling, however, there is lots of limitations. - # such as, the messages should be ordered as user by assistant or function... - # so, we just disable function calling for now. - - if tools is not None and len(tools) > 0: - raise BadRequestError("function calling is not supported yet.") - - if stop is not None: - if len(stop) > 4: - raise BadRequestError("stop list should not exceed 4 items.") - - for s in stop: - if len(s) > 20: - raise BadRequestError("stop item should not exceed 20 characters.") - - def _build_request_body( - self, - model: str, - messages: list[ErnieMessage], - stream: bool, - parameters: dict[str, Any], - tools: list[PromptMessageTool], - stop: list[str], - user: str, - ) -> dict[str, Any]: - # if model in self.function_calling_supports: - # return self._build_function_calling_request_body(model, messages, parameters, tools, stop, user) - return self._build_chat_request_body(model, messages, stream, parameters, stop, user) - - def _build_function_calling_request_body( - self, - model: str, - messages: list[ErnieMessage], - stream: bool, - parameters: dict[str, Any], - tools: list[PromptMessageTool], - stop: list[str], - user: str, - ) -> dict[str, Any]: - if len(messages) % 2 == 0: - raise BadRequestError("The number of messages should be odd.") - if messages[0].role == "function": - raise BadRequestError("The first message should be user message.") - - """ - TODO: implement function calling - """ - - def _build_chat_request_body( - self, - model: str, - messages: list[ErnieMessage], - stream: bool, - parameters: dict[str, Any], - stop: list[str], - user: str, - ) -> dict[str, Any]: - if len(messages) == 0: - raise BadRequestError("The number of messages should not be zero.") - - # check if the first element is system, shift it - system_message = "" - if messages[0].role == "system": - message = messages.pop(0) - system_message = message.content - - if len(messages) % 2 == 0: - raise BadRequestError("The number of messages should be odd.") - if messages[0].role != "user": - raise BadRequestError("The first message should be user message.") - body = { - "messages": [message.to_dict() for message in messages], - "stream": stream, - "stop": stop, - "user_id": user, - **parameters, - } - - if "max_tokens" in parameters and type(parameters["max_tokens"]) == int: - body["max_output_tokens"] = parameters["max_tokens"] - - if "presence_penalty" in parameters and type(parameters["presence_penalty"]) == float: - body["penalty_score"] = parameters["presence_penalty"] - - if system_message: - body["system"] = system_message - - return body - - def _handle_chat_generate_response(self, response: Response) -> ErnieMessage: - data = response.json() - if "error_code" in data: - code = data["error_code"] - msg = data["error_msg"] - # raise error - self._handle_error(code, msg) - - result = data["result"] - usage = data["usage"] - - message = ErnieMessage(content=result, role="assistant") - message.usage = { - "prompt_tokens": usage["prompt_tokens"], - "completion_tokens": usage["completion_tokens"], - "total_tokens": usage["total_tokens"], - } - - return message - - def _handle_chat_stream_generate_response(self, response: Response) -> Generator[ErnieMessage, None, None]: - for line in response.iter_lines(): - if len(line) == 0: - continue - line = line.decode("utf-8") - if line[0] == "{": - try: - data = loads(line) - if "error_code" in data: - code = data["error_code"] - msg = data["error_msg"] - # raise error - self._handle_error(code, msg) - except Exception as e: - raise InternalServerError(f"Failed to parse response: {e}") - - if line.startswith("data:"): - line = line[5:].strip() - else: - continue - - if not line: - continue - try: - data = loads(line) - except Exception as e: - raise InternalServerError(f"Failed to parse response: {e}") - - result = data["result"] - is_end = data["is_end"] - - if is_end: - usage = data["usage"] - finish_reason = data.get("finish_reason", None) - message = ErnieMessage(content=result, role="assistant") - message.usage = { - "prompt_tokens": usage["prompt_tokens"], - "completion_tokens": usage["completion_tokens"], - "total_tokens": usage["total_tokens"], - } - message.stop_reason = finish_reason - - yield message - else: - message = ErnieMessage(content=result, role="assistant") - yield message diff --git a/api/core/model_runtime/model_providers/wenxin/llm/llm.py b/api/core/model_runtime/model_providers/wenxin/llm/llm.py deleted file mode 100644 index f7c160b6b4..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/llm.py +++ /dev/null @@ -1,316 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union, cast - -from core.model_runtime.callbacks.base_callback import Callback -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.invoke import ( - InvokeError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.wenxin._common import BaiduAccessToken -from core.model_runtime.model_providers.wenxin.llm.ernie_bot import ErnieBotModel, ErnieMessage -from core.model_runtime.model_providers.wenxin.wenxin_errors import invoke_error_mapping - -ERNIE_BOT_BLOCK_MODE_PROMPT = """You should always follow the instructions and output a valid {{block}} object. -The structure of the {{block}} object you can found in the instructions, use {"answer": "$your_answer"} as the default structure -if you are not sure about the structure. - - -{{instructions}} - - -You should also complete the text started with ``` but not tell ``` directly. -""" # noqa: E501 - - -class ErnieBotLargeLanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - return self._generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ) - - def _code_block_mode_wrapper( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - callbacks: list[Callback] = None, - ) -> Union[LLMResult, Generator]: - """ - Code block mode wrapper for invoking large language model - """ - if "response_format" in model_parameters and model_parameters["response_format"] in {"JSON", "XML"}: - response_format = model_parameters["response_format"] - stop = stop or [] - self._transform_json_prompts( - model, credentials, prompt_messages, model_parameters, tools, stop, stream, user, response_format - ) - model_parameters.pop("response_format") - if stream: - return self._code_block_mode_stream_processor( - model=model, - prompt_messages=prompt_messages, - input_generator=self._invoke( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - ), - ) - - return self._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def _transform_json_prompts( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - response_format: str = "JSON", - ) -> None: - """ - Transform json prompts to model prompts - """ - - # check if there is a system message - if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage): - # override the system message - prompt_messages[0] = SystemPromptMessage( - content=ERNIE_BOT_BLOCK_MODE_PROMPT.replace("{{instructions}}", prompt_messages[0].content).replace( - "{{block}}", response_format - ) - ) - else: - # insert the system message - prompt_messages.insert( - 0, - SystemPromptMessage( - content=ERNIE_BOT_BLOCK_MODE_PROMPT.replace( - "{{instructions}}", f"Please output a valid {response_format} object." - ).replace("{{block}}", response_format) - ), - ) - - if len(prompt_messages) > 0 and isinstance(prompt_messages[-1], UserPromptMessage): - # add ```JSON\n to the last message - prompt_messages[-1].content += "\n```JSON\n{\n" - else: - # append a user message - prompt_messages.append(UserPromptMessage(content="```JSON\n{\n")) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool] | None = None, - ) -> int: - # tools is not supported yet - return self._num_tokens_from_messages(prompt_messages) - - def _num_tokens_from_messages( - self, - messages: list[PromptMessage], - ) -> int: - """Calculate num tokens for baichuan model""" - - def tokens(text: str): - return self._get_num_tokens_by_gpt2(text) - - tokens_per_message = 3 - - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - num_tokens += tokens(str(value)) - num_tokens += 3 - - return num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - api_key = credentials["api_key"] - secret_key = credentials["secret_key"] - try: - BaiduAccessToken.get_access_token(api_key, secret_key) - except Exception as e: - raise CredentialsValidateFailedError(f"Credentials validation failed: {e}") - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - instance = ErnieBotModel( - api_key=credentials["api_key"], - secret_key=credentials["secret_key"], - ) - - user = user or "ErnieBotDefault" - - # convert prompt messages to baichuan messages - messages = [ - ErnieMessage( - content=message.content - if isinstance(message.content, str) - else "".join([content.data for content in message.content]), - role=message.role.value, - ) - for message in prompt_messages - ] - - # invoke model - response = instance.generate( - model=model, - stream=stream, - messages=messages, - parameters=model_parameters, - timeout=60, - tools=tools, - stop=stop, - user=user, - ) - - if stream: - return self._handle_chat_generate_stream_response(model, prompt_messages, credentials, response) - else: - return self._handle_chat_generate_response(model, prompt_messages, credentials, response) - - def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: - """ - Convert PromptMessage to dict for Baichuan - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - raise ValueError("User message content must be str") - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - else: - raise ValueError(f"Unknown message type {type(message)}") - - return message_dict - - def _handle_chat_generate_response( - self, model: str, prompt_messages: list[PromptMessage], credentials: dict, response: ErnieMessage - ) -> LLMResult: - # convert baichuan message to llm result - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=response.usage["prompt_tokens"], - completion_tokens=response.usage["completion_tokens"], - ) - return LLMResult( - model=model, - prompt_messages=prompt_messages, - message=AssistantPromptMessage(content=response.content, tool_calls=[]), - usage=usage, - ) - - def _handle_chat_generate_stream_response( - self, - model: str, - prompt_messages: list[PromptMessage], - credentials: dict, - response: Generator[ErnieMessage, None, None], - ) -> Generator: - for message in response: - if message.usage: - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=message.usage["prompt_tokens"], - completion_tokens=message.usage["completion_tokens"], - ) - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=message.content, tool_calls=[]), - usage=usage, - finish_reason=message.stop_reason or None, - ), - ) - else: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, - message=AssistantPromptMessage(content=message.content, tool_calls=[]), - finish_reason=message.stop_reason or None, - ), - ) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return invoke_error_mapping() diff --git a/api/core/model_runtime/model_providers/wenxin/llm/yi_34b_chat.yaml b/api/core/model_runtime/model_providers/wenxin/llm/yi_34b_chat.yaml deleted file mode 100644 index 0b247fbd22..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/llm/yi_34b_chat.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: yi_34b_chat -label: - en_US: yi_34b_chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32000 -parameter_rules: - - name: temperature - use_template: temperature - min: 0.1 - max: 1.0 - default: 0.95 - - name: top_p - use_template: top_p - min: 0 - max: 1.0 - default: 0.7 - - name: max_tokens - use_template: max_tokens - default: 4096 - min: 2 - max: 4096 - - name: presence_penalty - use_template: presence_penalty - default: 1.0 - min: 1.0 - max: 2.0 diff --git a/api/core/model_runtime/model_providers/wenxin/text_embedding/__init__.py b/api/core/model_runtime/model_providers/wenxin/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/wenxin/text_embedding/bge-large-en.yaml b/api/core/model_runtime/model_providers/wenxin/text_embedding/bge-large-en.yaml deleted file mode 100644 index 74fadb7f9d..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/text_embedding/bge-large-en.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: bge-large-en -model_type: text-embedding -model_properties: - context_size: 512 - max_chunks: 16 -pricing: - input: '0.0005' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/wenxin/text_embedding/bge-large-zh.yaml b/api/core/model_runtime/model_providers/wenxin/text_embedding/bge-large-zh.yaml deleted file mode 100644 index d4af27ec38..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/text_embedding/bge-large-zh.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: bge-large-zh -model_type: text-embedding -model_properties: - context_size: 512 - max_chunks: 16 -pricing: - input: '0.0005' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/wenxin/text_embedding/embedding-v1.yaml b/api/core/model_runtime/model_providers/wenxin/text_embedding/embedding-v1.yaml deleted file mode 100644 index eda48d9655..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/text_embedding/embedding-v1.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: embedding-v1 -model_type: text-embedding -model_properties: - context_size: 384 - max_chunks: 16 -pricing: - input: '0.0005' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/wenxin/text_embedding/tao-8k.yaml b/api/core/model_runtime/model_providers/wenxin/text_embedding/tao-8k.yaml deleted file mode 100644 index e28f253eb6..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/text_embedding/tao-8k.yaml +++ /dev/null @@ -1,9 +0,0 @@ -model: tao-8k -model_type: text-embedding -model_properties: - context_size: 8192 - max_chunks: 1 -pricing: - input: '0.0005' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/wenxin/wenxin.py b/api/core/model_runtime/model_providers/wenxin/wenxin.py deleted file mode 100644 index 895af20bc8..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/wenxin.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class WenxinProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `ernie-bot` model for validate, - model_instance.validate_credentials(model="ernie-bot", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/wenxin/wenxin.yaml b/api/core/model_runtime/model_providers/wenxin/wenxin.yaml deleted file mode 100644 index 6a6b38e6a1..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/wenxin.yaml +++ /dev/null @@ -1,40 +0,0 @@ -provider: wenxin -label: - en_US: WenXin - zh_Hans: 文心一言 -icon_small: - en_US: icon_s_en.png - zh_Hans: icon_s_en.png -icon_large: - en_US: icon_l_en.png - zh_Hans: icon_l_zh.png -background: "#E8F5FE" -help: - title: - en_US: Get your API Key from WenXin - zh_Hans: 从文心一言获取您的 API Key - url: - en_US: https://cloud.baidu.com/wenxin.html -supported_model_types: - - llm - - text-embedding -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: secret_key - label: - en_US: Secret Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 Secret Key - en_US: Enter your Secret Key diff --git a/api/core/model_runtime/model_providers/wenxin/wenxin_errors.py b/api/core/model_runtime/model_providers/wenxin/wenxin_errors.py deleted file mode 100644 index bd074e0477..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/wenxin_errors.py +++ /dev/null @@ -1,54 +0,0 @@ -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) - - -def invoke_error_mapping() -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [], - InvokeServerUnavailableError: [InternalServerError], - InvokeRateLimitError: [RateLimitReachedError], - InvokeAuthorizationError: [ - InvalidAuthenticationError, - InsufficientAccountBalanceError, - InvalidAPIKeyError, - ], - InvokeBadRequestError: [BadRequestError, KeyError], - } - - -class InvalidAuthenticationError(Exception): - pass - - -class InvalidAPIKeyError(Exception): - pass - - -class RateLimitReachedError(Exception): - pass - - -class InsufficientAccountBalanceError(Exception): - pass - - -class InternalServerError(Exception): - pass - - -class BadRequestError(Exception): - pass diff --git a/api/core/model_runtime/model_providers/xinference/__init__.py b/api/core/model_runtime/model_providers/xinference/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/xinference/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/xinference/_assets/icon_l_en.svg deleted file mode 100644 index 8109176543..0000000000 --- a/api/core/model_runtime/model_providers/xinference/_assets/icon_l_en.svg +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/xinference/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/xinference/_assets/icon_s_en.svg deleted file mode 100644 index f5c5f75ea8..0000000000 --- a/api/core/model_runtime/model_providers/xinference/_assets/icon_s_en.svg +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/api/core/model_runtime/model_providers/xinference/llm/__init__.py b/api/core/model_runtime/model_providers/xinference/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/xinference/llm/llm.py b/api/core/model_runtime/model_providers/xinference/llm/llm.py deleted file mode 100644 index 286640079b..0000000000 --- a/api/core/model_runtime/model_providers/xinference/llm/llm.py +++ /dev/null @@ -1,816 +0,0 @@ -from collections.abc import Generator, Iterator -from typing import cast - -from openai import ( - APIConnectionError, - APITimeoutError, - AuthenticationError, - ConflictError, - InternalServerError, - NotFoundError, - OpenAI, - PermissionDeniedError, - RateLimitError, - UnprocessableEntityError, -) -from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessageToolCall -from openai.types.chat.chat_completion_chunk import ChoiceDeltaFunctionCall, ChoiceDeltaToolCall -from openai.types.chat.chat_completion_message import FunctionCall -from openai.types.completion import Completion -from xinference_client.client.restful.restful_client import ( - Client, - RESTfulChatModelHandle, - RESTfulGenerateModelHandle, -) - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContent, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - DefaultParameterName, - FetchFrom, - ModelFeature, - ModelPropertyKey, - ModelType, - ParameterRule, - ParameterType, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.xinference.xinference_helper import ( - XinferenceHelper, - XinferenceModelExtraParameter, -) -from core.model_runtime.utils import helper - - -class XinferenceAILargeLanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - """ - invoke LLM - - see `core.model_runtime.model_providers.__base.large_language_model.LargeLanguageModel._invoke` - """ - if "temperature" in model_parameters: - if model_parameters["temperature"] < 0.01: - model_parameters["temperature"] = 0.01 - elif model_parameters["temperature"] > 1.0: - model_parameters["temperature"] = 0.99 - - return self._generate( - model=model, - credentials=credentials, - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=tools, - stop=stop, - stream=stream, - user=user, - extra_model_kwargs=XinferenceHelper.get_xinference_extra_parameter( - server_url=credentials["server_url"], - model_uid=credentials["model_uid"], - api_key=credentials.get("api_key"), - ), - ) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - validate credentials - - credentials should be like: - { - 'model_type': 'text-generation', - 'server_url': 'server url', - 'model_uid': 'model uid', - } - """ - try: - if "/" in credentials["model_uid"] or "?" in credentials["model_uid"] or "#" in credentials["model_uid"]: - raise CredentialsValidateFailedError("model_uid should not contain /, ?, or #") - - extra_param = XinferenceHelper.get_xinference_extra_parameter( - server_url=credentials["server_url"], - model_uid=credentials["model_uid"], - api_key=credentials.get("api_key"), - ) - if "completion_type" not in credentials: - if "chat" in extra_param.model_ability: - credentials["completion_type"] = "chat" - elif "generate" in extra_param.model_ability: - credentials["completion_type"] = "completion" - else: - raise ValueError( - f"xinference model ability {extra_param.model_ability} is not supported," - f" check if you have the right model type" - ) - - if extra_param.support_function_call: - credentials["support_function_call"] = True - - if extra_param.support_vision: - credentials["support_vision"] = True - - if extra_param.context_length: - credentials["context_length"] = extra_param.context_length - - except RuntimeError as e: - raise CredentialsValidateFailedError(f"Xinference credentials validate failed: {e}") - except KeyError as e: - raise CredentialsValidateFailedError(f"Xinference credentials validate failed: {e}") - except Exception as e: - raise e - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool] | None = None, - ) -> int: - """ - get number of tokens - - cause XinferenceAI LLM is a customized model, we could net detect which tokenizer to use - so we just take the GPT2 tokenizer as default - """ - return self._num_tokens_from_messages(prompt_messages, tools) - - def _num_tokens_from_messages( - self, messages: list[PromptMessage], tools: list[PromptMessageTool], is_completion_model: bool = False - ) -> int: - def tokens(text: str): - return self._get_num_tokens_by_gpt2(text) - - if is_completion_model: - return sum(tokens(str(message.content)) for message in messages) - - tokens_per_message = 3 - tokens_per_name = 1 - - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - if key == "tool_calls": - for tool_call in value: - for t_key, t_value in tool_call.items(): - num_tokens += tokens(t_key) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += tokens(f_key) - num_tokens += tokens(f_value) - else: - num_tokens += tokens(t_key) - num_tokens += tokens(t_value) - if key == "function_call": - for t_key, t_value in value.items(): - num_tokens += tokens(t_key) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += tokens(f_key) - num_tokens += tokens(f_value) - else: - num_tokens += tokens(t_key) - num_tokens += tokens(t_value) - else: - num_tokens += tokens(str(value)) - - if key == "name": - num_tokens += tokens_per_name - num_tokens += 3 - - if tools: - num_tokens += self._num_tokens_for_tools(tools) - - return num_tokens - - def _num_tokens_for_tools(self, tools: list[PromptMessageTool]) -> int: - """ - Calculate num tokens for tool calling - - :param encoding: encoding - :param tools: tools for tool calling - :return: number of tokens - """ - - def tokens(text: str): - return self._get_num_tokens_by_gpt2(text) - - num_tokens = 0 - for tool in tools: - # calculate num tokens for function object - num_tokens += tokens("name") - num_tokens += tokens(tool.name) - num_tokens += tokens("description") - num_tokens += tokens(tool.description) - parameters = tool.parameters - num_tokens += tokens("parameters") - num_tokens += tokens("type") - num_tokens += tokens(parameters.get("type")) - if "properties" in parameters: - num_tokens += tokens("properties") - for key, value in parameters.get("properties").items(): - num_tokens += tokens(key) - for field_key, field_value in value.items(): - num_tokens += tokens(field_key) - if field_key == "enum": - for enum_field in field_value: - num_tokens += 3 - num_tokens += tokens(enum_field) - else: - num_tokens += tokens(field_key) - num_tokens += tokens(str(field_value)) - if "required" in parameters: - num_tokens += tokens("required") - for required_field in parameters["required"]: - num_tokens += 3 - num_tokens += tokens(required_field) - - return num_tokens - - def _convert_prompt_message_to_text(self, message: list[PromptMessage]) -> str: - """ - convert prompt message to text - """ - text = "" - for item in message: - if isinstance(item, UserPromptMessage | SystemPromptMessage | AssistantPromptMessage): - text += item.content - else: - raise NotImplementedError(f"PromptMessage type {type(item)} is not supported") - return text - - def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: - """ - Convert PromptMessage to dict for OpenAI Compatibility API - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - sub_messages = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(PromptMessageContent, message_content) - sub_message_dict = {"type": "text", "text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - sub_message_dict = { - "type": "image_url", - "image_url": {"url": message_content.data, "detail": message_content.detail.value}, - } - sub_messages.append(sub_message_dict) - message_dict = {"role": "user", "content": sub_messages} - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - if message.tool_calls and len(message.tool_calls) > 0: - message_dict["function_call"] = { - "name": message.tool_calls[0].function.name, - "arguments": message.tool_calls[0].function.arguments, - } - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - message_dict = {"tool_call_id": message.tool_call_id, "role": "tool", "content": message.content} - else: - raise ValueError(f"Unknown message type {type(message)}") - - return message_dict - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - rules = [ - ParameterRule( - name="temperature", - type=ParameterType.FLOAT, - use_template="temperature", - label=I18nObject(zh_Hans="温度", en_US="Temperature"), - ), - ParameterRule( - name="top_p", - type=ParameterType.FLOAT, - use_template="top_p", - label=I18nObject(zh_Hans="Top P", en_US="Top P"), - ), - ParameterRule( - name="max_tokens", - type=ParameterType.INT, - use_template="max_tokens", - min=1, - max=credentials.get("context_length", 2048), - default=512, - label=I18nObject(zh_Hans="最大生成长度", en_US="Max Tokens"), - ), - ParameterRule( - name=DefaultParameterName.PRESENCE_PENALTY, - use_template=DefaultParameterName.PRESENCE_PENALTY, - type=ParameterType.FLOAT, - label=I18nObject( - en_US="Presence Penalty", - zh_Hans="存在惩罚", - ), - required=False, - help=I18nObject( - en_US="Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they " - "appear in the text so far, increasing the model's likelihood to talk about new topics.", - zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词是否已出现在文本中对其进行惩罚," - "从而增加模型谈论新话题的可能性。", - ), - default=0.0, - min=-2.0, - max=2.0, - precision=2, - ), - ParameterRule( - name=DefaultParameterName.FREQUENCY_PENALTY, - use_template=DefaultParameterName.FREQUENCY_PENALTY, - type=ParameterType.FLOAT, - label=I18nObject( - en_US="Frequency Penalty", - zh_Hans="频率惩罚", - ), - required=False, - help=I18nObject( - en_US="Number between -2.0 and 2.0. Positive values penalize new tokens based on their " - "existing frequency in the text so far, decreasing the model's likelihood to repeat the " - "same line verbatim.", - zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词在文本中的现有频率对其进行惩罚," - "从而降低模型逐字重复相同内容的可能性。", - ), - default=0.0, - min=-2.0, - max=2.0, - precision=2, - ), - ] - - completion_type = None - - if "completion_type" in credentials: - if credentials["completion_type"] == "chat": - completion_type = LLMMode.CHAT.value - elif credentials["completion_type"] == "completion": - completion_type = LLMMode.COMPLETION.value - else: - raise ValueError(f'completion_type {credentials["completion_type"]} is not supported') - else: - extra_args = XinferenceHelper.get_xinference_extra_parameter( - server_url=credentials["server_url"], - model_uid=credentials["model_uid"], - api_key=credentials.get("api_key"), - ) - - if "chat" in extra_args.model_ability: - completion_type = LLMMode.CHAT.value - elif "generate" in extra_args.model_ability: - completion_type = LLMMode.COMPLETION.value - else: - raise ValueError(f"xinference model ability {extra_args.model_ability} is not supported") - - features = [] - - support_function_call = credentials.get("support_function_call", False) - if support_function_call: - features.append(ModelFeature.TOOL_CALL) - - support_vision = credentials.get("support_vision", False) - if support_vision: - features.append(ModelFeature.VISION) - - context_length = credentials.get("context_length", 2048) - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.LLM, - features=features, - model_properties={ModelPropertyKey.MODE: completion_type, ModelPropertyKey.CONTEXT_SIZE: context_length}, - parameter_rules=rules, - ) - - return entity - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - extra_model_kwargs: XinferenceModelExtraParameter, - tools: list[PromptMessageTool] | None = None, - stop: list[str] | None = None, - stream: bool = True, - user: str | None = None, - ) -> LLMResult | Generator: - """ - generate text from LLM - - see `core.model_runtime.model_providers.__base.large_language_model.LargeLanguageModel._generate` - - extra_model_kwargs can be got by `XinferenceHelper.get_xinference_extra_parameter` - """ - if "server_url" not in credentials: - raise CredentialsValidateFailedError("server_url is required in credentials") - - credentials["server_url"] = credentials["server_url"].removesuffix("/") - - api_key = credentials.get("api_key") or "abc" - - client = OpenAI( - base_url=f'{credentials["server_url"]}/v1', - api_key=api_key, - max_retries=3, - timeout=60, - ) - - xinference_client = Client( - base_url=credentials["server_url"], - api_key=credentials.get("api_key"), - ) - - xinference_model = xinference_client.get_model(credentials["model_uid"]) - - generate_config = { - "temperature": model_parameters.get("temperature", 1.0), - "top_p": model_parameters.get("top_p", 0.7), - "max_tokens": model_parameters.get("max_tokens", 512), - "presence_penalty": model_parameters.get("presence_penalty", 0.0), - "frequency_penalty": model_parameters.get("frequency_penalty", 0.0), - } - - if stop: - generate_config["stop"] = stop - - if tools and len(tools) > 0: - generate_config["tools"] = [{"type": "function", "function": helper.dump_model(tool)} for tool in tools] - vision = credentials.get("support_vision", False) - if isinstance(xinference_model, RESTfulChatModelHandle): - resp = client.chat.completions.create( - model=credentials["model_uid"], - messages=[self._convert_prompt_message_to_dict(message) for message in prompt_messages], - stream=stream, - user=user, - **generate_config, - ) - if stream: - if tools and len(tools) > 0: - raise InvokeBadRequestError("xinference tool calls does not support stream mode") - return self._handle_chat_stream_response( - model=model, credentials=credentials, prompt_messages=prompt_messages, tools=tools, resp=resp - ) - return self._handle_chat_generate_response( - model=model, credentials=credentials, prompt_messages=prompt_messages, tools=tools, resp=resp - ) - elif isinstance(xinference_model, RESTfulGenerateModelHandle): - resp = client.completions.create( - model=credentials["model_uid"], - prompt=self._convert_prompt_message_to_text(prompt_messages), - stream=stream, - user=user, - **generate_config, - ) - if stream: - return self._handle_completion_stream_response( - model=model, credentials=credentials, prompt_messages=prompt_messages, tools=tools, resp=resp - ) - return self._handle_completion_generate_response( - model=model, credentials=credentials, prompt_messages=prompt_messages, tools=tools, resp=resp - ) - else: - raise NotImplementedError(f"xinference model handle type {type(xinference_model)} is not supported") - - def _extract_response_tool_calls( - self, response_tool_calls: list[ChatCompletionMessageToolCall | ChoiceDeltaToolCall] - ) -> list[AssistantPromptMessage.ToolCall]: - """ - Extract tool calls from response - - :param response_tool_calls: response tool calls - :return: list of tool calls - """ - tool_calls = [] - if response_tool_calls: - for response_tool_call in response_tool_calls: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_tool_call.function.name, arguments=response_tool_call.function.arguments - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_tool_call.id, type=response_tool_call.type, function=function - ) - tool_calls.append(tool_call) - - return tool_calls - - def _extract_response_function_call( - self, response_function_call: FunctionCall | ChoiceDeltaFunctionCall - ) -> AssistantPromptMessage.ToolCall: - """ - Extract function call from response - - :param response_function_call: response function call - :return: tool call - """ - tool_call = None - if response_function_call: - function = AssistantPromptMessage.ToolCall.ToolCallFunction( - name=response_function_call.name, arguments=response_function_call.arguments - ) - - tool_call = AssistantPromptMessage.ToolCall( - id=response_function_call.name, type="function", function=function - ) - - return tool_call - - def _handle_chat_generate_response( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool], - resp: ChatCompletion, - ) -> LLMResult: - """ - handle normal chat generate response - """ - if len(resp.choices) == 0: - raise InvokeServerUnavailableError("Empty response") - - assistant_message = resp.choices[0].message - - # convert tool call to assistant message tool call - tool_calls = assistant_message.tool_calls - assistant_prompt_message_tool_calls = self._extract_response_tool_calls(tool_calls or []) - function_call = assistant_message.function_call - if function_call: - assistant_prompt_message_tool_calls += [self._extract_response_function_call(function_call)] - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=assistant_message.content, tool_calls=assistant_prompt_message_tool_calls - ) - - prompt_tokens = self._num_tokens_from_messages(messages=prompt_messages, tools=tools) - completion_tokens = self._num_tokens_from_messages(messages=[assistant_prompt_message], tools=tools) - - usage = self._calc_response_usage( - model=model, credentials=credentials, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens - ) - - response = LLMResult( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=resp.system_fingerprint, - usage=usage, - message=assistant_prompt_message, - ) - - return response - - def _handle_chat_stream_response( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool], - resp: Iterator[ChatCompletionChunk], - ) -> Generator: - """ - handle stream chat generate response - """ - full_response = "" - - for chunk in resp: - if len(chunk.choices) == 0: - continue - - delta = chunk.choices[0] - - if delta.finish_reason is None and (delta.delta.content is None or delta.delta.content == ""): - continue - - # check if there is a tool call in the response - function_call = None - tool_calls = [] - if delta.delta.tool_calls: - tool_calls += delta.delta.tool_calls - if delta.delta.function_call: - function_call = delta.delta.function_call - - assistant_message_tool_calls = self._extract_response_tool_calls(tool_calls) - if function_call: - assistant_message_tool_calls += [self._extract_response_function_call(function_call)] - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=delta.delta.content or "", tool_calls=assistant_message_tool_calls - ) - - if delta.finish_reason is not None: - # temp_assistant_prompt_message is used to calculate usage - temp_assistant_prompt_message = AssistantPromptMessage( - content=full_response, tool_calls=assistant_message_tool_calls - ) - - prompt_tokens = self._num_tokens_from_messages(messages=prompt_messages, tools=tools) - completion_tokens = self._num_tokens_from_messages(messages=[temp_assistant_prompt_message], tools=[]) - - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - ) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=0, message=assistant_prompt_message, finish_reason=delta.finish_reason, usage=usage - ), - ) - else: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=0, - message=assistant_prompt_message, - ), - ) - - full_response += delta.delta.content - - def _handle_completion_generate_response( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool], - resp: Completion, - ) -> LLMResult: - """ - handle normal completion generate response - """ - if len(resp.choices) == 0: - raise InvokeServerUnavailableError("Empty response") - - assistant_message = resp.choices[0].text - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=assistant_message, tool_calls=[]) - - prompt_tokens = self._get_num_tokens_by_gpt2(self._convert_prompt_message_to_text(prompt_messages)) - completion_tokens = self._num_tokens_from_messages( - messages=[assistant_prompt_message], tools=[], is_completion_model=True - ) - usage = self._calc_response_usage( - model=model, credentials=credentials, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens - ) - - response = LLMResult( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=resp.system_fingerprint, - usage=usage, - message=assistant_prompt_message, - ) - - return response - - def _handle_completion_stream_response( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool], - resp: Iterator[Completion], - ) -> Generator: - """ - handle stream completion generate response - """ - full_response = "" - - for chunk in resp: - if len(chunk.choices) == 0: - continue - - delta = chunk.choices[0] - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=delta.text or "", tool_calls=[]) - - if delta.finish_reason is not None: - # temp_assistant_prompt_message is used to calculate usage - temp_assistant_prompt_message = AssistantPromptMessage(content=full_response, tool_calls=[]) - - prompt_tokens = self._get_num_tokens_by_gpt2(self._convert_prompt_message_to_text(prompt_messages)) - completion_tokens = self._num_tokens_from_messages( - messages=[temp_assistant_prompt_message], tools=[], is_completion_model=True - ) - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - ) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=0, message=assistant_prompt_message, finish_reason=delta.finish_reason, usage=usage - ), - ) - else: - if delta.text is None or delta.text == "": - continue - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=chunk.system_fingerprint, - delta=LLMResultChunkDelta( - index=0, - message=assistant_prompt_message, - ), - ) - - full_response += delta.text - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [ - APIConnectionError, - APITimeoutError, - ], - InvokeServerUnavailableError: [ - InternalServerError, - ConflictError, - NotFoundError, - UnprocessableEntityError, - PermissionDeniedError, - ], - InvokeRateLimitError: [RateLimitError], - InvokeAuthorizationError: [AuthenticationError], - InvokeBadRequestError: [ValueError], - } diff --git a/api/core/model_runtime/model_providers/xinference/rerank/__init__.py b/api/core/model_runtime/model_providers/xinference/rerank/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/xinference/rerank/rerank.py b/api/core/model_runtime/model_providers/xinference/rerank/rerank.py deleted file mode 100644 index 8f18bc42d2..0000000000 --- a/api/core/model_runtime/model_providers/xinference/rerank/rerank.py +++ /dev/null @@ -1,189 +0,0 @@ -from typing import Optional - -from xinference_client.client.restful.restful_client import Client, RESTfulRerankModelHandle - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType -from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.rerank_model import RerankModel - - -class XinferenceRerankModel(RerankModel): - """ - Model class for Xinference rerank model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - query: str, - docs: list[str], - score_threshold: Optional[float] = None, - top_n: Optional[int] = None, - user: Optional[str] = None, - ) -> RerankResult: - """ - Invoke rerank model - - :param model: model name - :param credentials: model credentials - :param query: search query - :param docs: docs for reranking - :param score_threshold: score threshold - :param top_n: top n - :param user: unique user id - :return: rerank result - """ - if len(docs) == 0: - return RerankResult(model=model, docs=[]) - - server_url = credentials["server_url"] - model_uid = credentials["model_uid"] - api_key = credentials.get("api_key") - server_url = server_url.removesuffix("/") - auth_headers = {"Authorization": f"Bearer {api_key}"} if api_key else {} - - params = {"documents": docs, "query": query, "top_n": top_n, "return_documents": True} - try: - handle = RESTfulRerankModelHandle(model_uid, server_url, auth_headers) - response = handle.rerank(**params) - except RuntimeError as e: - if "rerank hasn't support extra parameter" not in str(e): - raise InvokeServerUnavailableError(str(e)) - - # compatible xinference server between v0.10.1 - v0.12.1, not support 'return_len' - handle = RESTfulRerankModelHandleWithoutExtraParameter(model_uid, server_url, auth_headers) - response = handle.rerank(**params) - - rerank_documents = [] - for idx, result in enumerate(response["results"]): - # format document - index = result["index"] - page_content = result["document"] if isinstance(result["document"], str) else result["document"]["text"] - rerank_document = RerankDocument( - index=index, - text=page_content, - score=result["relevance_score"], - ) - - # score threshold check - if score_threshold is not None: - if result["relevance_score"] >= score_threshold: - rerank_documents.append(rerank_document) - else: - rerank_documents.append(rerank_document) - - return RerankResult(model=model, docs=rerank_documents) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - if "/" in credentials["model_uid"] or "?" in credentials["model_uid"] or "#" in credentials["model_uid"]: - raise CredentialsValidateFailedError("model_uid should not contain /, ?, or #") - - credentials["server_url"] = credentials["server_url"].removesuffix("/") - - # initialize client - client = Client( - base_url=credentials["server_url"], - api_key=credentials.get("api_key"), - ) - - xinference_client = client.get_model(model_uid=credentials["model_uid"]) - - if not isinstance(xinference_client, RESTfulRerankModelHandle): - raise InvokeBadRequestError( - "please check model type, the model you want to invoke is not a rerank model" - ) - - self.invoke( - model=model, - credentials=credentials, - query="Whose kasumi", - docs=[ - 'Kasumi is a girl\'s name of Japanese origin meaning "mist".', - "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music ", - "and she leads a team named PopiParty.", - ], - score_threshold=0.8, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.RERANK, - model_properties={}, - parameter_rules=[], - ) - - return entity - - -class RESTfulRerankModelHandleWithoutExtraParameter(RESTfulRerankModelHandle): - def rerank( - self, - documents: list[str], - query: str, - top_n: Optional[int] = None, - max_chunks_per_doc: Optional[int] = None, - return_documents: Optional[bool] = None, - **kwargs, - ): - url = f"{self._base_url}/v1/rerank" - request_body = { - "model": self._model_uid, - "documents": documents, - "query": query, - "top_n": top_n, - "max_chunks_per_doc": max_chunks_per_doc, - "return_documents": return_documents, - } - - import requests - - response = requests.post(url, json=request_body, headers=self.auth_headers) - if response.status_code != 200: - raise InvokeServerUnavailableError(f"Failed to rerank documents, detail: {response.json()['detail']}") - response_data = response.json() - return response_data diff --git a/api/core/model_runtime/model_providers/xinference/speech2text/__init__.py b/api/core/model_runtime/model_providers/xinference/speech2text/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py b/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py deleted file mode 100644 index a6c5b8a0a5..0000000000 --- a/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py +++ /dev/null @@ -1,144 +0,0 @@ -from typing import IO, Optional - -from xinference_client.client.restful.restful_client import Client, RESTfulAudioModelHandle - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel - - -class XinferenceSpeech2TextModel(Speech2TextModel): - """ - Model class for Xinference speech to text model. - """ - - def _invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) -> str: - """ - Invoke speech2text model - - :param model: model name - :param credentials: model credentials - :param file: audio file - :param user: unique user id - :return: text for given audio file - """ - return self._speech2text_invoke(model, credentials, file) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - if "/" in credentials["model_uid"] or "?" in credentials["model_uid"] or "#" in credentials["model_uid"]: - raise CredentialsValidateFailedError("model_uid should not contain /, ?, or #") - - credentials["server_url"] = credentials["server_url"].removesuffix("/") - - # initialize client - client = Client( - base_url=credentials["server_url"], - api_key=credentials.get("api_key"), - ) - - xinference_client = client.get_model(model_uid=credentials["model_uid"]) - - if not isinstance(xinference_client, RESTfulAudioModelHandle): - raise InvokeBadRequestError( - "please check model type, the model you want to invoke is not a audio model" - ) - - audio_file_path = self._get_demo_file_path() - - with open(audio_file_path, "rb") as audio_file: - self.invoke(model, credentials, audio_file) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError], - } - - def _speech2text_invoke( - self, - model: str, - credentials: dict, - file: IO[bytes], - language: Optional[str] = None, - prompt: Optional[str] = None, - response_format: Optional[str] = "json", - temperature: Optional[float] = 0, - ) -> str: - """ - Invoke speech2text model - - :param model: model name - :param credentials: model credentials - :param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, - mpga, m4a, ogg, wav, or webm. - :param language: The language of the input audio. Supplying the input language in ISO-639-1 - :param prompt: An optional text to guide the model's style or continue a previous audio segment. - The prompt should match the audio language. - :param response_format: The format of the transcript output, in one of these options: json, text, srt, - verbose_json, or vtt. - :param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more - random,while lower values like 0.2 will make it more focused and deterministic.If set to 0, the model will use - log probability to automatically increase the temperature until certain thresholds are hit. - :return: text for given audio file - """ - server_url = credentials["server_url"] - model_uid = credentials["model_uid"] - api_key = credentials.get("api_key") - server_url = server_url.removesuffix("/") - auth_headers = {"Authorization": f"Bearer {api_key}"} if api_key else {} - - try: - handle = RESTfulAudioModelHandle(model_uid, server_url, auth_headers) - response = handle.transcriptions( - audio=file, language=language, prompt=prompt, response_format=response_format, temperature=temperature - ) - except RuntimeError as e: - raise InvokeServerUnavailableError(str(e)) - - return response["text"] - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.SPEECH2TEXT, - model_properties={}, - parameter_rules=[], - ) - - return entity diff --git a/api/core/model_runtime/model_providers/xinference/text_embedding/__init__.py b/api/core/model_runtime/model_providers/xinference/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/xinference/tts/__init__.py b/api/core/model_runtime/model_providers/xinference/tts/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/xinference/tts/tts.py b/api/core/model_runtime/model_providers/xinference/tts/tts.py deleted file mode 100644 index 81dbe397d2..0000000000 --- a/api/core/model_runtime/model_providers/xinference/tts/tts.py +++ /dev/null @@ -1,228 +0,0 @@ -import concurrent.futures -from typing import Optional - -from xinference_client.client.restful.restful_client import RESTfulAudioModelHandle - -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.tts_model import TTSModel -from core.model_runtime.model_providers.xinference.xinference_helper import XinferenceHelper - - -class XinferenceText2SpeechModel(TTSModel): - def __init__(self): - # preset voices, need support custom voice - self.model_voices = { - "__default": { - "all": [ - {"name": "Default", "value": "default"}, - ] - }, - "ChatTTS": { - "all": [ - {"name": "Alloy", "value": "alloy"}, - {"name": "Echo", "value": "echo"}, - {"name": "Fable", "value": "fable"}, - {"name": "Onyx", "value": "onyx"}, - {"name": "Nova", "value": "nova"}, - {"name": "Shimmer", "value": "shimmer"}, - ] - }, - "CosyVoice": { - "zh-Hans": [ - {"name": "中文男", "value": "中文男"}, - {"name": "中文女", "value": "中文女"}, - {"name": "粤语女", "value": "粤语女"}, - ], - "zh-Hant": [ - {"name": "中文男", "value": "中文男"}, - {"name": "中文女", "value": "中文女"}, - {"name": "粤语女", "value": "粤语女"}, - ], - "en-US": [ - {"name": "英文男", "value": "英文男"}, - {"name": "英文女", "value": "英文女"}, - ], - "ja-JP": [ - {"name": "日语男", "value": "日语男"}, - ], - "ko-KR": [ - {"name": "韩语女", "value": "韩语女"}, - ], - }, - } - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - if "/" in credentials["model_uid"] or "?" in credentials["model_uid"] or "#" in credentials["model_uid"]: - raise CredentialsValidateFailedError("model_uid should not contain /, ?, or #") - - credentials["server_url"] = credentials["server_url"].removesuffix("/") - - extra_param = XinferenceHelper.get_xinference_extra_parameter( - server_url=credentials["server_url"], - model_uid=credentials["model_uid"], - api_key=credentials.get("api_key"), - ) - - if "text-to-audio" not in extra_param.model_ability: - raise InvokeBadRequestError( - "please check model type, the model you want to invoke is not a text-to-audio model" - ) - - if extra_param.model_family and extra_param.model_family in self.model_voices: - credentials["audio_model_name"] = extra_param.model_family - else: - credentials["audio_model_name"] = "__default" - - self._tts_invoke_streaming( - model=model, - credentials=credentials, - content_text="Hello Dify!", - voice=self._get_model_default_voice(model, credentials), - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _invoke( - self, model: str, tenant_id: str, credentials: dict, content_text: str, voice: str, user: Optional[str] = None - ): - """ - _invoke text2speech model - - :param model: model name - :param tenant_id: user tenant id - :param credentials: model credentials - :param voice: model timbre - :param content_text: text content to be translated - :param user: unique user id - :return: text translated to audio file - """ - return self._tts_invoke_streaming(model, credentials, content_text, voice) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.TTS, - model_properties={}, - parameter_rules=[], - ) - - return entity - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError], - } - - def get_tts_model_voices(self, model: str, credentials: dict, language: Optional[str] = None) -> list: - audio_model_name = credentials.get("audio_model_name", "__default") - for key, voices in self.model_voices.items(): - if key in audio_model_name: - if language and language in voices: - return voices[language] - elif "all" in voices: - return voices["all"] - else: - all_voices = [] - for lang, lang_voices in voices.items(): - all_voices.extend(lang_voices) - return all_voices - - return self.model_voices["__default"]["all"] - - def _get_model_default_voice(self, model: str, credentials: dict) -> any: - return "" - - def _get_model_word_limit(self, model: str, credentials: dict) -> int: - return 3500 - - def _get_model_audio_type(self, model: str, credentials: dict) -> str: - return "mp3" - - def _get_model_workers_limit(self, model: str, credentials: dict) -> int: - return 5 - - def _tts_invoke_streaming(self, model: str, credentials: dict, content_text: str, voice: str) -> any: - """ - _tts_invoke_streaming text2speech model - - :param model: model name - :param credentials: model credentials - :param content_text: text content to be translated - :param voice: model timbre - :return: text translated to audio file - """ - credentials["server_url"] = credentials["server_url"].removesuffix("/") - - try: - api_key = credentials.get("api_key") - auth_headers = {"Authorization": f"Bearer {api_key}"} if api_key else {} - handle = RESTfulAudioModelHandle( - credentials["model_uid"], credentials["server_url"], auth_headers=auth_headers - ) - - model_support_voice = [ - x.get("value") for x in self.get_tts_model_voices(model=model, credentials=credentials) - ] - if not voice or voice not in model_support_voice: - voice = self._get_model_default_voice(model, credentials) - word_limit = self._get_model_word_limit(model, credentials) - if len(content_text) > word_limit: - sentences = self._split_text_into_sentences(content_text, max_length=word_limit) - executor = concurrent.futures.ThreadPoolExecutor(max_workers=min(3, len(sentences))) - futures = [ - executor.submit( - handle.speech, input=sentences[i], voice=voice, response_format="mp3", speed=1.0, stream=True - ) - for i in range(len(sentences)) - ] - - for future in futures: - response = future.result() - for chunk in response: - yield chunk - else: - response = handle.speech( - input=content_text.strip(), voice=voice, response_format="mp3", speed=1.0, stream=True - ) - - for chunk in response: - yield chunk - except Exception as ex: - raise InvokeBadRequestError(str(ex)) diff --git a/api/core/model_runtime/model_providers/xinference/xinference.py b/api/core/model_runtime/model_providers/xinference/xinference.py deleted file mode 100644 index d85f7c82e7..0000000000 --- a/api/core/model_runtime/model_providers/xinference/xinference.py +++ /dev/null @@ -1,10 +0,0 @@ -import logging - -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class XinferenceAIProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - pass diff --git a/api/core/model_runtime/model_providers/xinference/xinference.yaml b/api/core/model_runtime/model_providers/xinference/xinference.yaml deleted file mode 100644 index be9073c1ca..0000000000 --- a/api/core/model_runtime/model_providers/xinference/xinference.yaml +++ /dev/null @@ -1,58 +0,0 @@ -provider: xinference -label: - en_US: Xorbits Inference -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#FAF5FF" -help: - title: - en_US: How to deploy Xinference - zh_Hans: 如何部署 Xinference - url: - en_US: https://github.com/xorbitsai/inference -supported_model_types: - - llm - - text-embedding - - rerank - - speech2text - - tts -configurate_methods: - - customizable-model -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: server_url - label: - zh_Hans: 服务器URL - en_US: Server url - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入Xinference的服务器地址,如 http://192.168.1.100:9997 - en_US: Enter the url of your Xinference, e.g. http://192.168.1.100:9997 - - variable: model_uid - label: - zh_Hans: 模型UID - en_US: Model uid - type: text-input - required: true - placeholder: - zh_Hans: 在此输入您的Model UID - en_US: Enter the model uid - - variable: api_key - label: - zh_Hans: API密钥 - en_US: API key - type: secret-input - required: false - placeholder: - zh_Hans: 在此输入您的API密钥 - en_US: Enter the api key diff --git a/api/core/model_runtime/model_providers/xinference/xinference_helper.py b/api/core/model_runtime/model_providers/xinference/xinference_helper.py deleted file mode 100644 index 619ee1492a..0000000000 --- a/api/core/model_runtime/model_providers/xinference/xinference_helper.py +++ /dev/null @@ -1,134 +0,0 @@ -from threading import Lock -from time import time -from typing import Optional - -from requests.adapters import HTTPAdapter -from requests.exceptions import ConnectionError, MissingSchema, Timeout -from requests.sessions import Session -from yarl import URL - - -class XinferenceModelExtraParameter: - model_format: str - model_handle_type: str - model_ability: list[str] - max_tokens: int = 512 - context_length: int = 2048 - support_function_call: bool = False - support_vision: bool = False - model_family: Optional[str] - - def __init__( - self, - model_format: str, - model_handle_type: str, - model_ability: list[str], - support_function_call: bool, - support_vision: bool, - max_tokens: int, - context_length: int, - model_family: Optional[str], - ) -> None: - self.model_format = model_format - self.model_handle_type = model_handle_type - self.model_ability = model_ability - self.support_function_call = support_function_call - self.support_vision = support_vision - self.max_tokens = max_tokens - self.context_length = context_length - self.model_family = model_family - - -cache = {} -cache_lock = Lock() - - -class XinferenceHelper: - @staticmethod - def get_xinference_extra_parameter(server_url: str, model_uid: str, api_key: str) -> XinferenceModelExtraParameter: - XinferenceHelper._clean_cache() - with cache_lock: - if model_uid not in cache: - cache[model_uid] = { - "expires": time() + 300, - "value": XinferenceHelper._get_xinference_extra_parameter(server_url, model_uid, api_key), - } - return cache[model_uid]["value"] - - @staticmethod - def _clean_cache() -> None: - try: - with cache_lock: - expired_keys = [model_uid for model_uid, model in cache.items() if model["expires"] < time()] - for model_uid in expired_keys: - del cache[model_uid] - except RuntimeError as e: - pass - - @staticmethod - def _get_xinference_extra_parameter(server_url: str, model_uid: str, api_key: str) -> XinferenceModelExtraParameter: - """ - get xinference model extra parameter like model_format and model_handle_type - """ - - if not model_uid or not model_uid.strip() or not server_url or not server_url.strip(): - raise RuntimeError("model_uid is empty") - - url = str(URL(server_url) / "v1" / "models" / model_uid) - - # this method is surrounded by a lock, and default requests may hang forever, - # so we just set a Adapter with max_retries=3 - session = Session() - session.mount("http://", HTTPAdapter(max_retries=3)) - session.mount("https://", HTTPAdapter(max_retries=3)) - headers = {"Authorization": f"Bearer {api_key}"} if api_key else {} - - try: - response = session.get(url, headers=headers, timeout=10) - except (MissingSchema, ConnectionError, Timeout) as e: - raise RuntimeError(f"get xinference model extra parameter failed, url: {url}, error: {e}") - if response.status_code != 200: - raise RuntimeError( - f"get xinference model extra parameter failed, status code: {response.status_code}," - f" response: {response.text}" - ) - - response_json = response.json() - - model_format = response_json.get("model_format", "ggmlv3") - model_ability = response_json.get("model_ability", []) - model_family = response_json.get("model_family", None) - - if response_json.get("model_type") == "embedding": - model_handle_type = "embedding" - elif response_json.get("model_type") == "audio": - model_handle_type = "audio" - if model_family and model_family in {"ChatTTS", "CosyVoice", "FishAudio"}: - model_ability.append("text-to-audio") - else: - model_ability.append("audio-to-text") - elif model_format == "ggmlv3" and "chatglm" in response_json["model_name"]: - model_handle_type = "chatglm" - elif "generate" in model_ability: - model_handle_type = "generate" - elif "chat" in model_ability: - model_handle_type = "chat" - else: - raise NotImplementedError("xinference model handle type is not supported") - - support_function_call = "tools" in model_ability - support_vision = "vision" in model_ability - max_tokens = response_json.get("max_tokens", 512) - - context_length = response_json.get("context_length", 2048) - - return XinferenceModelExtraParameter( - model_format=model_format, - model_handle_type=model_handle_type, - model_ability=model_ability, - support_function_call=support_function_call, - support_vision=support_vision, - max_tokens=max_tokens, - context_length=context_length, - model_family=model_family, - ) diff --git a/api/core/model_runtime/model_providers/yi/__init__.py b/api/core/model_runtime/model_providers/yi/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/yi/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/yi/_assets/icon_l_en.svg deleted file mode 100644 index 9ce3baddaa..0000000000 --- a/api/core/model_runtime/model_providers/yi/_assets/icon_l_en.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - - - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/yi/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/yi/_assets/icon_s_en.svg deleted file mode 100644 index eb0395a21c..0000000000 --- a/api/core/model_runtime/model_providers/yi/_assets/icon_s_en.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/yi/llm/__init__.py b/api/core/model_runtime/model_providers/yi/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/yi/llm/_position.yaml b/api/core/model_runtime/model_providers/yi/llm/_position.yaml deleted file mode 100644 index e876893b41..0000000000 --- a/api/core/model_runtime/model_providers/yi/llm/_position.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- yi-34b-chat-0205 -- yi-34b-chat-200k -- yi-vl-plus -- yi-large -- yi-medium -- yi-vision -- yi-medium-200k -- yi-spark -- yi-large-turbo diff --git a/api/core/model_runtime/model_providers/yi/llm/llm.py b/api/core/model_runtime/model_providers/yi/llm/llm.py deleted file mode 100644 index 5ab7fd126e..0000000000 --- a/api/core/model_runtime/model_providers/yi/llm/llm.py +++ /dev/null @@ -1,127 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union -from urllib.parse import urlparse - -import tiktoken - -from core.model_runtime.entities.llm_entities import LLMResult -from core.model_runtime.entities.message_entities import ( - PromptMessage, - PromptMessageTool, - SystemPromptMessage, -) -from core.model_runtime.model_providers.openai.llm.llm import OpenAILargeLanguageModel - - -class YiLargeLanguageModel(OpenAILargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._add_custom_parameters(credentials) - - # yi-vl-plus not support system prompt yet. - if model == "yi-vl-plus": - prompt_message_except_system: list[PromptMessage] = [] - for message in prompt_messages: - if not isinstance(message, SystemPromptMessage): - prompt_message_except_system.append(message) - return super()._invoke( - model, credentials, prompt_message_except_system, model_parameters, tools, stop, stream - ) - - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream) - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials) - super().validate_credentials(model, credentials) - - # refactored from openai model runtime, use cl100k_base for calculate token number - def _num_tokens_from_string(self, model: str, text: str, tools: Optional[list[PromptMessageTool]] = None) -> int: - """ - Calculate num tokens for text completion model with tiktoken package. - - :param model: model name - :param text: prompt text - :param tools: tools for tool calling - :return: number of tokens - """ - encoding = tiktoken.get_encoding("cl100k_base") - num_tokens = len(encoding.encode(text)) - - if tools: - num_tokens += self._num_tokens_for_tools(encoding, tools) - - return num_tokens - - # refactored from openai model runtime, use cl100k_base for calculate token number - def _num_tokens_from_messages( - self, model: str, messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None - ) -> int: - """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. - - Official documentation: https://github.com/openai/openai-cookbook/blob/ - main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" - encoding = tiktoken.get_encoding("cl100k_base") - tokens_per_message = 3 - tokens_per_name = 1 - - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - # Cast str(value) in case the message value is not a string - # This occurs with function messages - # TODO: The current token calculation method for the image type is not implemented, - # which need to download the image and then get the resolution for calculation, - # and will increase the request delay - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - if key == "tool_calls": - for tool_call in value: - for t_key, t_value in tool_call.items(): - num_tokens += len(encoding.encode(t_key)) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += len(encoding.encode(f_key)) - num_tokens += len(encoding.encode(f_value)) - else: - num_tokens += len(encoding.encode(t_key)) - num_tokens += len(encoding.encode(t_value)) - else: - num_tokens += len(encoding.encode(str(value))) - - if key == "name": - num_tokens += tokens_per_name - - # every reply is primed with assistant - num_tokens += 3 - - if tools: - num_tokens += self._num_tokens_for_tools(encoding, tools) - - return num_tokens - - @staticmethod - def _add_custom_parameters(credentials: dict) -> None: - credentials["mode"] = "chat" - credentials["openai_api_key"] = credentials["api_key"] - if "endpoint_url" not in credentials or credentials["endpoint_url"] == "": - credentials["openai_api_base"] = "https://api.lingyiwanwu.com" - else: - parsed_url = urlparse(credentials["endpoint_url"]) - credentials["openai_api_base"] = f"{parsed_url.scheme}://{parsed_url.netloc}" diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-0205.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-0205.yaml deleted file mode 100644 index ea3d8f5dce..0000000000 --- a/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-0205.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: yi-34b-chat-0205 -label: - zh_Hans: yi-34b-chat-0205 - en_US: yi-34b-chat-0205 -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 - en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4000 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.01 - max: 1.00 - help: - zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 - en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. -pricing: - input: '2.5' - output: '2.5' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-200k.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-200k.yaml deleted file mode 100644 index d91f984d7f..0000000000 --- a/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-200k.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: yi-34b-chat-200k -label: - zh_Hans: yi-34b-chat-200k - en_US: yi-34b-chat-200k -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 200000 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.6 - min: 0.0 - max: 2.0 - help: - zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 - en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - - name: max_tokens - use_template: max_tokens - type: int - default: 4096 - min: 1 - max: 199950 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - type: float - default: 0.9 - min: 0.01 - max: 1.00 - help: - zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 - en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. -pricing: - input: '12' - output: '12' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-large-turbo.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-large-turbo.yaml deleted file mode 100644 index 1d00eca2ca..0000000000 --- a/api/core/model_runtime/model_providers/yi/llm/yi-large-turbo.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: yi-large-turbo -label: - zh_Hans: yi-large-turbo - en_US: yi-large-turbo -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 16384 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 - en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - - name: max_tokens - use_template: max_tokens - type: int - default: 1024 - min: 1 - max: 16384 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - type: float - default: 0.9 - min: 0.01 - max: 1.00 - help: - zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 - en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. -pricing: - input: '12' - output: '12' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-large.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-large.yaml deleted file mode 100644 index 347f511280..0000000000 --- a/api/core/model_runtime/model_providers/yi/llm/yi-large.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: yi-large -label: - zh_Hans: yi-large - en_US: yi-large -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 16384 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 - en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - - name: max_tokens - use_template: max_tokens - type: int - default: 1024 - min: 1 - max: 16384 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - type: float - default: 0.9 - min: 0.01 - max: 1.00 - help: - zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 - en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. -pricing: - input: '20' - output: '20' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-medium-200k.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-medium-200k.yaml deleted file mode 100644 index e8ddbcba97..0000000000 --- a/api/core/model_runtime/model_providers/yi/llm/yi-medium-200k.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: yi-medium-200k -label: - zh_Hans: yi-medium-200k - en_US: yi-medium-200k -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 204800 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 - en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - - name: max_tokens - use_template: max_tokens - type: int - default: 1024 - min: 1 - max: 204800 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - type: float - default: 0.9 - min: 0.01 - max: 1.00 - help: - zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 - en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. -pricing: - input: '12' - output: '12' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-medium.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-medium.yaml deleted file mode 100644 index 4f0244d1f5..0000000000 --- a/api/core/model_runtime/model_providers/yi/llm/yi-medium.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: yi-medium -label: - zh_Hans: yi-medium - en_US: yi-medium -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 16384 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 - en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - - name: max_tokens - use_template: max_tokens - type: int - default: 1024 - min: 1 - max: 16384 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - type: float - default: 0.9 - min: 0.01 - max: 1.00 - help: - zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 - en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. -pricing: - input: '2.5' - output: '2.5' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-spark.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-spark.yaml deleted file mode 100644 index e28e9fd8c0..0000000000 --- a/api/core/model_runtime/model_providers/yi/llm/yi-spark.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: yi-spark -label: - zh_Hans: yi-spark - en_US: yi-spark -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 16384 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 - en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - - name: max_tokens - use_template: max_tokens - type: int - default: 1024 - min: 1 - max: 16384 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - type: float - default: 0.9 - min: 0.01 - max: 1.00 - help: - zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 - en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. -pricing: - input: '1' - output: '1' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-vision.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-vision.yaml deleted file mode 100644 index bce34f5836..0000000000 --- a/api/core/model_runtime/model_providers/yi/llm/yi-vision.yaml +++ /dev/null @@ -1,44 +0,0 @@ -model: yi-vision -label: - zh_Hans: yi-vision - en_US: yi-vision -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 - en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - - name: max_tokens - use_template: max_tokens - type: int - default: 1024 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - type: float - default: 0.9 - min: 0.01 - max: 1.00 - help: - zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 - en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. -pricing: - input: '6' - output: '6' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-vl-plus.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-vl-plus.yaml deleted file mode 100644 index 461c68583f..0000000000 --- a/api/core/model_runtime/model_providers/yi/llm/yi-vl-plus.yaml +++ /dev/null @@ -1,43 +0,0 @@ -model: yi-vl-plus -label: - zh_Hans: yi-vl-plus - en_US: yi-vl-plus -model_type: llm -features: - - vision -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 - en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4000 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.01 - max: 1.00 - help: - zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 - en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. -pricing: - input: '6' - output: '6' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/yi/yi.py b/api/core/model_runtime/model_providers/yi/yi.py deleted file mode 100644 index 9599acb22b..0000000000 --- a/api/core/model_runtime/model_providers/yi/yi.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class YiProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `yi-34b-chat-0205` model for validate, - # no matter what model you pass in, text completion model or chat model - model_instance.validate_credentials(model="yi-34b-chat-0205", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/yi/yi.yaml b/api/core/model_runtime/model_providers/yi/yi.yaml deleted file mode 100644 index de741afb10..0000000000 --- a/api/core/model_runtime/model_providers/yi/yi.yaml +++ /dev/null @@ -1,41 +0,0 @@ -provider: yi -label: - en_US: 01.AI - zh_Hans: 零一万物 -description: - en_US: Models provided by 01.AI, such as yi-34b-chat and yi-vl-plus. - zh_Hans: 零一万物提供的模型,例如 yi-34b-chat 和 yi-vl-plus。 -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#E9F1EC" -help: - title: - en_US: Get your API Key from 01.ai - zh_Hans: 从零一万物获取 API Key - url: - en_US: https://platform.lingyiwanwu.com/apikeys -supported_model_types: - - llm -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: endpoint_url - label: - zh_Hans: 自定义 API endpoint 地址 - en_US: Custom API endpoint URL - type: text-input - required: false - placeholder: - zh_Hans: Base URL, e.g. https://api.lingyiwanwu.com/v1 - en_US: Base URL, e.g. https://api.lingyiwanwu.com/v1 diff --git a/api/core/model_runtime/model_providers/zhinao/__init__.py b/api/core/model_runtime/model_providers/zhinao/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/zhinao/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/zhinao/_assets/icon_l_en.svg deleted file mode 100644 index b22b869441..0000000000 --- a/api/core/model_runtime/model_providers/zhinao/_assets/icon_l_en.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/api/core/model_runtime/model_providers/zhinao/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/zhinao/_assets/icon_s_en.svg deleted file mode 100644 index 8fe72b7d09..0000000000 --- a/api/core/model_runtime/model_providers/zhinao/_assets/icon_s_en.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/api/core/model_runtime/model_providers/zhinao/llm/360gpt-turbo-responsibility-8k.yaml b/api/core/model_runtime/model_providers/zhinao/llm/360gpt-turbo-responsibility-8k.yaml deleted file mode 100644 index f420df0001..0000000000 --- a/api/core/model_runtime/model_providers/zhinao/llm/360gpt-turbo-responsibility-8k.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: 360gpt-turbo-responsibility-8k -label: - zh_Hans: 360gpt-turbo-responsibility-8k - en_US: 360gpt-turbo-responsibility-8k -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 8192 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 8192 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/zhinao/llm/360gpt-turbo.yaml b/api/core/model_runtime/model_providers/zhinao/llm/360gpt-turbo.yaml deleted file mode 100644 index a2658fbe4f..0000000000 --- a/api/core/model_runtime/model_providers/zhinao/llm/360gpt-turbo.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: 360gpt-turbo -label: - zh_Hans: 360gpt-turbo - en_US: 360gpt-turbo -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 2048 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/zhinao/llm/360gpt2-pro.yaml b/api/core/model_runtime/model_providers/zhinao/llm/360gpt2-pro.yaml deleted file mode 100644 index 00c81eb1da..0000000000 --- a/api/core/model_runtime/model_providers/zhinao/llm/360gpt2-pro.yaml +++ /dev/null @@ -1,36 +0,0 @@ -model: 360gpt2-pro -label: - zh_Hans: 360gpt2-pro - en_US: 360gpt2-pro -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 2048 -parameter_rules: - - name: temperature - use_template: temperature - min: 0 - max: 1 - default: 0.5 - - name: top_p - use_template: top_p - min: 0 - max: 1 - default: 1 - - name: max_tokens - use_template: max_tokens - min: 1 - max: 2048 - default: 1024 - - name: frequency_penalty - use_template: frequency_penalty - min: -2 - max: 2 - default: 0 - - name: presence_penalty - use_template: presence_penalty - min: -2 - max: 2 - default: 0 diff --git a/api/core/model_runtime/model_providers/zhinao/llm/__init__.py b/api/core/model_runtime/model_providers/zhinao/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/zhinao/llm/_position.yaml b/api/core/model_runtime/model_providers/zhinao/llm/_position.yaml deleted file mode 100644 index ab8dbf5182..0000000000 --- a/api/core/model_runtime/model_providers/zhinao/llm/_position.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- 360gpt2-pro -- 360gpt-turbo -- 360gpt-turbo-responsibility-8k diff --git a/api/core/model_runtime/model_providers/zhinao/llm/llm.py b/api/core/model_runtime/model_providers/zhinao/llm/llm.py deleted file mode 100644 index befc3de021..0000000000 --- a/api/core/model_runtime/model_providers/zhinao/llm/llm.py +++ /dev/null @@ -1,31 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union - -from core.model_runtime.entities.llm_entities import LLMResult -from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel - - -class ZhinaoLargeLanguageModel(OAIAPICompatLargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - self._add_custom_parameters(credentials) - return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream) - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials) - super().validate_credentials(model, credentials) - - @classmethod - def _add_custom_parameters(cls, credentials: dict) -> None: - credentials["mode"] = "chat" - credentials["endpoint_url"] = "https://api.360.cn/v1" diff --git a/api/core/model_runtime/model_providers/zhinao/zhinao.py b/api/core/model_runtime/model_providers/zhinao/zhinao.py deleted file mode 100644 index 2a263292f9..0000000000 --- a/api/core/model_runtime/model_providers/zhinao/zhinao.py +++ /dev/null @@ -1,28 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class ZhinaoProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - # Use `360gpt-turbo` model for validate, - # no matter what model you pass in, text completion model or chat model - model_instance.validate_credentials(model="360gpt-turbo", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/zhinao/zhinao.yaml b/api/core/model_runtime/model_providers/zhinao/zhinao.yaml deleted file mode 100644 index c5cb142c47..0000000000 --- a/api/core/model_runtime/model_providers/zhinao/zhinao.yaml +++ /dev/null @@ -1,32 +0,0 @@ -provider: zhinao -label: - en_US: 360 AI - zh_Hans: 360 智脑 -description: - en_US: Models provided by 360 AI. - zh_Hans: 360 智脑提供的模型。 -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#e3f0ff" -help: - title: - en_US: Get your API Key from 360 AI. - zh_Hans: 从360 智脑获取 API Key - url: - en_US: https://ai.360.com/platform/keys -supported_model_types: - - llm -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/zhipuai/__init__.py b/api/core/model_runtime/model_providers/zhipuai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/zhipuai/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/zhipuai/_assets/icon_l_en.svg deleted file mode 100644 index d32499917d..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/_assets/icon_l_en.svg +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - diff --git a/api/core/model_runtime/model_providers/zhipuai/_assets/icon_l_zh.svg b/api/core/model_runtime/model_providers/zhipuai/_assets/icon_l_zh.svg deleted file mode 100644 index 067ea2c427..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/_assets/icon_l_zh.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/api/core/model_runtime/model_providers/zhipuai/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/zhipuai/_assets/icon_s_en.svg deleted file mode 100644 index 016f97ddab..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/_assets/icon_s_en.svg +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - diff --git a/api/core/model_runtime/model_providers/zhipuai/_common.py b/api/core/model_runtime/model_providers/zhipuai/_common.py deleted file mode 100644 index fa95232f71..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/_common.py +++ /dev/null @@ -1,41 +0,0 @@ -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) - - -class _CommonZhipuaiAI: - def _to_credential_kwargs(self, credentials: dict) -> dict: - """ - Transform credentials to kwargs for model instance - - :param credentials: - :return: - """ - credentials_kwargs = { - "api_key": credentials["api_key"] if "api_key" in credentials else credentials.get("zhipuai_api_key"), - } - - return credentials_kwargs - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [], - InvokeServerUnavailableError: [], - InvokeRateLimitError: [], - InvokeAuthorizationError: [], - InvokeBadRequestError: [], - } diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/__init__.py b/api/core/model_runtime/model_providers/zhipuai/llm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_lite.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_lite.yaml deleted file mode 100644 index 9778de1a2e..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_lite.yaml +++ /dev/null @@ -1,22 +0,0 @@ -model: chatglm_lite -label: - en_US: chatglm_lite -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.9 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. -deprecated: true diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_lite_32k.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_lite_32k.yaml deleted file mode 100644 index 7836d964c6..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_lite_32k.yaml +++ /dev/null @@ -1,22 +0,0 @@ -model: chatglm_lite_32k -label: - en_US: chatglm_lite_32k -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.9 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. -deprecated: true diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_pro.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_pro.yaml deleted file mode 100644 index b3d53c812b..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_pro.yaml +++ /dev/null @@ -1,22 +0,0 @@ -model: chatglm_pro -label: - en_US: chatglm_pro -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.9 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. -deprecated: true diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_std.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_std.yaml deleted file mode 100644 index 7d8b9520a0..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_std.yaml +++ /dev/null @@ -1,22 +0,0 @@ -model: chatglm_std -label: - en_US: chatglm_std -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.9 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. -deprecated: true diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_turbo.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_turbo.yaml deleted file mode 100644 index fcd5c5ef64..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/chatglm_turbo.yaml +++ /dev/null @@ -1,51 +0,0 @@ -model: chatglm_turbo -label: - en_US: chatglm_turbo -model_type: llm -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.95 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: do_sample - label: - zh_Hans: 采样策略 - en_US: Sampling strategy - type: boolean - help: - zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。 - en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true. - default: true - - name: stream - label: - zh_Hans: 流处理 - en_US: Event Stream - type: boolean - help: - zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。 - en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts. - default: false - - name: return_type - label: - zh_Hans: 回复类型 - en_US: Return Type - type: string - help: - zh_Hans: 用于控制每次返回内容的类型,空或者没有此字段时默认按照 json_string 返回,json_string 返回标准的 JSON 字符串,text 返回原始的文本内容。 - en_US: Used to control the type of content returned each time. When it is empty or does not have this field, it will be returned as json_string by default. json_string returns a standard JSON string, and text returns the original text content. - required: false - options: - - text - - json_string diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml deleted file mode 100644 index 7fcf692202..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-0520.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: glm-4-0520 -label: - en_US: glm-4-0520 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.95 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: do_sample - label: - zh_Hans: 采样策略 - en_US: Sampling strategy - type: boolean - help: - zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。 - en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true. - default: true - - name: stream - label: - zh_Hans: 流处理 - en_US: Event Stream - type: boolean - help: - zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。 - en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts. - default: false - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 4095 - - name: web_search - type: boolean - label: - zh_Hans: 联网搜索 - en_US: Web Search - default: false - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. -pricing: - input: '0.1' - output: '0.1' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml deleted file mode 100644 index fcd7c7768c..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-air.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: glm-4-air -label: - en_US: glm-4-air -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.95 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: do_sample - label: - zh_Hans: 采样策略 - en_US: Sampling strategy - type: boolean - help: - zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。 - en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true. - default: true - - name: stream - label: - zh_Hans: 流处理 - en_US: Event Stream - type: boolean - help: - zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。 - en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts. - default: false - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 4095 - - name: web_search - type: boolean - label: - zh_Hans: 联网搜索 - en_US: Web Search - default: false - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. -pricing: - input: '0.001' - output: '0.001' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml deleted file mode 100644 index c9ae5abf19..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-airx.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: glm-4-airx -label: - en_US: glm-4-airx -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.95 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: do_sample - label: - zh_Hans: 采样策略 - en_US: Sampling strategy - type: boolean - help: - zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。 - en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true. - default: true - - name: stream - label: - zh_Hans: 流处理 - en_US: Event Stream - type: boolean - help: - zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。 - en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts. - default: false - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 4095 - - name: web_search - type: boolean - label: - zh_Hans: 联网搜索 - en_US: Web Search - default: false - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. -pricing: - input: '0.01' - output: '0.01' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml deleted file mode 100644 index 98c4f72c72..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm-4-flash.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: glm-4-flash -label: - en_US: glm-4-flash -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.95 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: do_sample - label: - zh_Hans: 采样策略 - en_US: Sampling strategy - type: boolean - help: - zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。 - en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true. - default: true - - name: stream - label: - zh_Hans: 流处理 - en_US: Event Stream - type: boolean - help: - zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。 - en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts. - default: false - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 4095 - - name: web_search - type: boolean - label: - zh_Hans: 联网搜索 - en_US: Web Search - default: false - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. -pricing: - input: '0' - output: '0' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_3_turbo.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_3_turbo.yaml deleted file mode 100644 index 0b5391ce2f..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm_3_turbo.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: glm-3-turbo -label: - en_US: glm-3-turbo -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.95 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: do_sample - label: - zh_Hans: 采样策略 - en_US: Sampling strategy - type: boolean - help: - zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。 - en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true. - default: true - - name: stream - label: - zh_Hans: 流处理 - en_US: Event Stream - type: boolean - help: - zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。 - en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts. - default: false - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 8192 - - name: web_search - type: boolean - label: - zh_Hans: 联网搜索 - en_US: Web Search - default: false - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. -pricing: - input: '0.001' - output: '0.001' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4.yaml deleted file mode 100644 index 62f453fb77..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: glm-4 -label: - en_US: glm-4 -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.95 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: do_sample - label: - zh_Hans: 采样策略 - en_US: Sampling strategy - type: boolean - help: - zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。 - en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true. - default: true - - name: stream - label: - zh_Hans: 流处理 - en_US: Event Stream - type: boolean - help: - zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。 - en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts. - default: false - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 4095 - - name: web_search - type: boolean - label: - zh_Hans: 联网搜索 - en_US: Web Search - default: false - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. -pricing: - input: '0.1' - output: '0.1' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_long.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_long.yaml deleted file mode 100644 index 350b080c3f..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_long.yaml +++ /dev/null @@ -1,65 +0,0 @@ -model: glm-4-long -label: - en_US: glm-4-long -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat - context_size: 10240 -parameter_rules: - - name: temperature - use_template: temperature - default: 0.95 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - min: 0.0 - max: 1.0 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: do_sample - label: - zh_Hans: 采样策略 - en_US: Sampling strategy - type: boolean - help: - zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。 - en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true. - default: true - - name: stream - label: - zh_Hans: 流处理 - en_US: Event Stream - type: boolean - help: - zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。 - en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts. - default: false - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 4095 - - name: web_search - type: boolean - label: - zh_Hans: 联网搜索 - en_US: Web Search - default: false - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. -pricing: - input: '0.001' - output: '0.001' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_plus.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_plus.yaml deleted file mode 100644 index 2d7ebd71cf..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4_plus.yaml +++ /dev/null @@ -1,62 +0,0 @@ -model: glm-4-plus -label: - en_US: glm-4-plus -model_type: llm -features: - - multi-tool-call - - agent-thought - - stream-tool-call -model_properties: - mode: chat -parameter_rules: - - name: temperature - use_template: temperature - default: 0.95 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.7 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: do_sample - label: - zh_Hans: 采样策略 - en_US: Sampling strategy - type: boolean - help: - zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。 - en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true. - default: true - - name: stream - label: - zh_Hans: 流处理 - en_US: Event Stream - type: boolean - help: - zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。 - en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts. - default: false - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 4095 - - name: web_search - type: boolean - label: - zh_Hans: 联网搜索 - en_US: Web Search - default: false - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. -pricing: - input: '0.05' - output: '0.05' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v.yaml deleted file mode 100644 index 3a1120ff37..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v.yaml +++ /dev/null @@ -1,60 +0,0 @@ -model: glm-4v -label: - en_US: glm-4v -model_type: llm -model_properties: - mode: chat -features: - - vision -parameter_rules: - - name: temperature - use_template: temperature - default: 0.95 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.6 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: do_sample - label: - zh_Hans: 采样策略 - en_US: Sampling strategy - type: boolean - help: - zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。 - en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true. - default: true - - name: stream - label: - zh_Hans: 流处理 - en_US: Event Stream - type: boolean - help: - zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。 - en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts. - default: false - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 1024 - - name: web_search - type: boolean - label: - zh_Hans: 联网搜索 - en_US: Web Search - default: false - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. -pricing: - input: '0.05' - output: '0.05' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v_plus.yaml b/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v_plus.yaml deleted file mode 100644 index 14b9623e5a..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/glm_4v_plus.yaml +++ /dev/null @@ -1,60 +0,0 @@ -model: glm-4v-plus -label: - en_US: glm-4v-plus -model_type: llm -model_properties: - mode: chat -features: - - vision -parameter_rules: - - name: temperature - use_template: temperature - default: 0.95 - min: 0.0 - max: 1.0 - help: - zh_Hans: 采样温度,控制输出的随机性,必须为正数取值范围是:(0.0,1.0],不能等于 0,默认值为 0.95 值越大,会使输出更随机,更具创造性;值越小,输出会更加稳定或确定建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Sampling temperature, controls the randomness of the output, must be a positive number. The value range is (0.0,1.0], which cannot be equal to 0. The default value is 0.95. The larger the value, the more random and creative the output will be; the smaller the value, The output will be more stable or certain. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: top_p - use_template: top_p - default: 0.6 - help: - zh_Hans: 用温度取样的另一种方法,称为核取样取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1,默认值为 0.7 模型考虑具有 top_p 概率质量tokens的结果例如:0.1 意味着模型解码器只考虑从前 10% 的概率的候选集中取 tokens 建议您根据应用场景调整 top_p 或 temperature 参数,但不要同时调整两个参数。 - en_US: Another method of temperature sampling is called kernel sampling. The value range is (0.0, 1.0) open interval, which cannot be equal to 0 or 1. The default value is 0.7. The model considers the results with top_p probability mass tokens. For example 0.1 means The model decoder only considers tokens from the candidate set with the top 10% probability. It is recommended that you adjust the top_p or temperature parameters according to the application scenario, but do not adjust both parameters at the same time. - - name: do_sample - label: - zh_Hans: 采样策略 - en_US: Sampling strategy - type: boolean - help: - zh_Hans: do_sample 为 true 时启用采样策略,do_sample 为 false 时采样策略 temperature、top_p 将不生效。默认值为 true。 - en_US: When `do_sample` is set to true, the sampling strategy is enabled. When `do_sample` is set to false, the sampling strategies such as `temperature` and `top_p` will not take effect. The default value is true. - default: true - - name: stream - label: - zh_Hans: 流处理 - en_US: Event Stream - type: boolean - help: - zh_Hans: 使用同步调用时,此参数应当设置为 fasle 或者省略。表示模型生成完所有内容后一次性返回所有内容。默认值为 false。如果设置为 true,模型将通过标准 Event Stream ,逐块返回模型生成内容。Event Stream 结束时会返回一条data:[DONE]消息。注意:在模型流式输出生成内容的过程中,我们会分批对模型生成内容进行检测,当检测到违法及不良信息时,API会返回错误码(1301)。开发者识别到错误码(1301),应及时采取(清屏、重启对话)等措施删除生成内容,并确保不将含有违法及不良信息的内容传递给模型继续生成,避免其造成负面影响。 - en_US: When using synchronous invocation, this parameter should be set to false or omitted. It indicates that the model will return all the generated content at once after the generation is complete. The default value is false. If set to true, the model will return the generated content in chunks via the standard Event Stream. A data:[DONE] message will be sent at the end of the Event Stream.Note:During the model's streaming output process, we will batch check the generated content. If illegal or harmful information is detected, the API will return an error code (1301). Developers who identify error code (1301) should promptly take actions such as clearing the screen or restarting the conversation to delete the generated content. They should also ensure that no illegal or harmful content is passed back to the model for continued generation to avoid negative impacts. - default: false - - name: max_tokens - use_template: max_tokens - default: 1024 - min: 1 - max: 1024 - - name: web_search - type: boolean - label: - zh_Hans: 联网搜索 - en_US: Web Search - default: false - help: - zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 - en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. -pricing: - input: '0.01' - output: '0.01' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py deleted file mode 100644 index ea331701ab..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py +++ /dev/null @@ -1,486 +0,0 @@ -from collections.abc import Generator -from typing import Optional, Union - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageContent, - PromptMessageContentType, - PromptMessageRole, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.zhipuai._common import _CommonZhipuaiAI -from core.model_runtime.model_providers.zhipuai.zhipuai_sdk._client import ZhipuAI -from core.model_runtime.model_providers.zhipuai.zhipuai_sdk.types.chat.chat_completion import Completion -from core.model_runtime.model_providers.zhipuai.zhipuai_sdk.types.chat.chat_completion_chunk import ChatCompletionChunk -from core.model_runtime.utils import helper - -GLM_JSON_MODE_PROMPT = """You should always follow the instructions and output a valid JSON object. -The structure of the JSON object you can found in the instructions, use {"answer": "$your_answer"} as the default structure -if you are not sure about the structure. - -And you should always end the block with a "```" to indicate the end of the JSON object. - - -{{instructions}} - - -```JSON""" # noqa: E501 - - -class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - - # invoke model - # stop = stop or [] - # self._transform_json_prompts(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - return self._generate(model, credentials_kwargs, prompt_messages, model_parameters, tools, stop, stream, user) - - # def _transform_json_prompts(self, model: str, credentials: dict, - # prompt_messages: list[PromptMessage], model_parameters: dict, - # tools: list[PromptMessageTool] | None = None, stop: list[str] | None = None, - # stream: bool = True, user: str | None = None) \ - # -> None: - # """ - # Transform json prompts to model prompts - # """ - # if "}\n\n" not in stop: - # stop.append("}\n\n") - - # # check if there is a system message - # if len(prompt_messages) > 0 and isinstance(prompt_messages[0], SystemPromptMessage): - # # override the system message - # prompt_messages[0] = SystemPromptMessage( - # content=GLM_JSON_MODE_PROMPT.replace("{{instructions}}", prompt_messages[0].content) - # ) - # else: - # # insert the system message - # prompt_messages.insert(0, SystemPromptMessage( - # content=GLM_JSON_MODE_PROMPT.replace("{{instructions}}", "Please output a valid JSON object.") - # )) - # # check if the last message is a user message - # if len(prompt_messages) > 0 and isinstance(prompt_messages[-1], UserPromptMessage): - # # add ```JSON\n to the last message - # prompt_messages[-1].content += "\n```JSON\n" - # else: - # # append a user message - # prompt_messages.append(UserPromptMessage( - # content="```JSON\n" - # )) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: - """ - prompt = self._convert_messages_to_prompt(prompt_messages, tools) - - return self._get_num_tokens_by_gpt2(prompt) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - self._generate( - model=model, - credentials_kwargs=credentials_kwargs, - prompt_messages=[ - UserPromptMessage(content="ping"), - ], - model_parameters={ - "temperature": 0.5, - }, - tools=[], - stream=False, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _generate( - self, - model: str, - credentials_kwargs: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials_kwargs: credentials kwargs - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - extra_model_kwargs = {} - # request to glm-4v-plus with stop words will always response "finish_reason":"network_error" - if stop and model != "glm-4v-plus": - extra_model_kwargs["stop"] = stop - - client = ZhipuAI(api_key=credentials_kwargs["api_key"]) - - if len(prompt_messages) == 0: - raise ValueError("At least one message is required") - - if prompt_messages[0].role == PromptMessageRole.SYSTEM: - if not prompt_messages[0].content: - prompt_messages = prompt_messages[1:] - - # resolve zhipuai model not support system message and user message, assistant message must be in sequence - new_prompt_messages: list[PromptMessage] = [] - for prompt_message in prompt_messages: - copy_prompt_message = prompt_message.copy() - if copy_prompt_message.role in {PromptMessageRole.USER, PromptMessageRole.SYSTEM, PromptMessageRole.TOOL}: - if isinstance(copy_prompt_message.content, list): - # check if model is 'glm-4v' - if model not in {"glm-4v", "glm-4v-plus"}: - # not support list message - continue - # get image and - if not isinstance(copy_prompt_message, UserPromptMessage): - # not support system message - continue - new_prompt_messages.append(copy_prompt_message) - - if not isinstance(copy_prompt_message.content, str): - # not support image message - continue - - if ( - new_prompt_messages - and new_prompt_messages[-1].role == PromptMessageRole.USER - and copy_prompt_message.role == PromptMessageRole.USER - ): - new_prompt_messages[-1].content += "\n\n" + copy_prompt_message.content - else: - if copy_prompt_message.role in {PromptMessageRole.USER, PromptMessageRole.TOOL}: - new_prompt_messages.append(copy_prompt_message) - elif copy_prompt_message.role == PromptMessageRole.SYSTEM: - new_prompt_message = SystemPromptMessage(content=copy_prompt_message.content) - new_prompt_messages.append(new_prompt_message) - else: - new_prompt_message = UserPromptMessage(content=copy_prompt_message.content) - new_prompt_messages.append(new_prompt_message) - else: - if new_prompt_messages and new_prompt_messages[-1].role == PromptMessageRole.ASSISTANT: - new_prompt_messages[-1].content += "\n\n" + copy_prompt_message.content - else: - new_prompt_messages.append(copy_prompt_message) - - if model in {"glm-4v", "glm-4v-plus"}: - params = self._construct_glm_4v_parameter(model, new_prompt_messages, model_parameters) - else: - params = {"model": model, "messages": [], **model_parameters} - # glm model - if not model.startswith("chatglm"): - for prompt_message in new_prompt_messages: - if prompt_message.role == PromptMessageRole.TOOL: - params["messages"].append( - { - "role": "tool", - "content": prompt_message.content, - "tool_call_id": prompt_message.tool_call_id, - } - ) - elif isinstance(prompt_message, AssistantPromptMessage): - if prompt_message.tool_calls: - params["messages"].append( - { - "role": "assistant", - "content": prompt_message.content, - "tool_calls": [ - { - "id": tool_call.id, - "type": tool_call.type, - "function": { - "name": tool_call.function.name, - "arguments": tool_call.function.arguments, - }, - } - for tool_call in prompt_message.tool_calls - ], - } - ) - else: - params["messages"].append({"role": "assistant", "content": prompt_message.content}) - else: - params["messages"].append( - {"role": prompt_message.role.value, "content": prompt_message.content} - ) - else: - # chatglm model - for prompt_message in new_prompt_messages: - # merge system message to user message - if prompt_message.role in { - PromptMessageRole.SYSTEM, - PromptMessageRole.TOOL, - PromptMessageRole.USER, - }: - if len(params["messages"]) > 0 and params["messages"][-1]["role"] == "user": - params["messages"][-1]["content"] += "\n\n" + prompt_message.content - else: - params["messages"].append({"role": "user", "content": prompt_message.content}) - else: - params["messages"].append( - {"role": prompt_message.role.value, "content": prompt_message.content} - ) - - if tools and len(tools) > 0: - params["tools"] = [{"type": "function", "function": helper.dump_model(tool)} for tool in tools] - - if stream: - response = client.chat.completions.create(stream=stream, **params, **extra_model_kwargs) - return self._handle_generate_stream_response(model, credentials_kwargs, tools, response, prompt_messages) - - response = client.chat.completions.create(**params, **extra_model_kwargs) - return self._handle_generate_response(model, credentials_kwargs, tools, response, prompt_messages) - - def _construct_glm_4v_parameter(self, model: str, prompt_messages: list[PromptMessage], model_parameters: dict): - messages = [ - {"role": message.role.value, "content": self._construct_glm_4v_messages(message.content)} - for message in prompt_messages - ] - - params = {"model": model, "messages": messages, **model_parameters} - - return params - - def _construct_glm_4v_messages(self, prompt_message: Union[str, list[PromptMessageContent]]) -> list[dict]: - if isinstance(prompt_message, str): - return [{"type": "text", "text": prompt_message}] - - return [ - {"type": "image_url", "image_url": {"url": self._remove_image_header(item.data)}} - if item.type == PromptMessageContentType.IMAGE - else {"type": "text", "text": item.data} - for item in prompt_message - ] - - def _remove_image_header(self, image: str) -> str: - if image.startswith("data:image"): - return image.split(",")[1] - - return image - - def _handle_generate_response( - self, - model: str, - credentials: dict, - tools: Optional[list[PromptMessageTool]], - response: Completion, - prompt_messages: list[PromptMessage], - ) -> LLMResult: - """ - Handle llm response - - :param model: model name - :param response: response - :param prompt_messages: prompt messages - :return: llm response - """ - text = "" - assistant_tool_calls: list[AssistantPromptMessage.ToolCall] = [] - for choice in response.choices: - if choice.message.tool_calls: - for tool_call in choice.message.tool_calls: - if tool_call.type == "function": - assistant_tool_calls.append( - AssistantPromptMessage.ToolCall( - id=tool_call.id, - type=tool_call.type, - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=tool_call.function.name, - arguments=tool_call.function.arguments, - ), - ) - ) - - text += choice.message.content or "" - - prompt_usage = response.usage.prompt_tokens - completion_usage = response.usage.completion_tokens - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_usage, completion_usage) - - # transform response - result = LLMResult( - model=model, - prompt_messages=prompt_messages, - message=AssistantPromptMessage(content=text, tool_calls=assistant_tool_calls), - usage=usage, - ) - - return result - - def _handle_generate_stream_response( - self, - model: str, - credentials: dict, - tools: Optional[list[PromptMessageTool]], - responses: Generator[ChatCompletionChunk, None, None], - prompt_messages: list[PromptMessage], - ) -> Generator: - """ - Handle llm stream response - - :param model: model name - :param response: response - :param prompt_messages: prompt messages - :return: llm response chunk generator result - """ - full_assistant_content = "" - for chunk in responses: - if len(chunk.choices) == 0: - continue - - delta = chunk.choices[0] - - if delta.finish_reason is None and (delta.delta.content is None or delta.delta.content == ""): - continue - - assistant_tool_calls: list[AssistantPromptMessage.ToolCall] = [] - for tool_call in delta.delta.tool_calls or []: - if tool_call.type == "function": - assistant_tool_calls.append( - AssistantPromptMessage.ToolCall( - id=tool_call.id, - type=tool_call.type, - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=tool_call.function.name, - arguments=tool_call.function.arguments, - ), - ) - ) - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage( - content=delta.delta.content or "", tool_calls=assistant_tool_calls - ) - - full_assistant_content += delta.delta.content or "" - - if delta.finish_reason is not None and chunk.usage is not None: - completion_tokens = chunk.usage.completion_tokens - prompt_tokens = chunk.usage.prompt_tokens - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( - model=chunk.model, - prompt_messages=prompt_messages, - system_fingerprint="", - delta=LLMResultChunkDelta( - index=delta.index, - message=assistant_prompt_message, - finish_reason=delta.finish_reason, - usage=usage, - ), - ) - else: - yield LLMResultChunk( - model=chunk.model, - prompt_messages=prompt_messages, - system_fingerprint="", - delta=LLMResultChunkDelta( - index=delta.index, message=assistant_prompt_message, finish_reason=delta.finish_reason - ), - ) - - def _convert_one_message_to_text(self, message: PromptMessage) -> str: - """ - Convert a single message to a string. - - :param message: PromptMessage to convert. - :return: String representation of the message. - """ - human_prompt = "\n\nHuman:" - ai_prompt = "\n\nAssistant:" - content = message.content - - if isinstance(message, UserPromptMessage): - message_text = f"{human_prompt} {content}" - elif isinstance(message, AssistantPromptMessage): - message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage | ToolPromptMessage): - message_text = content - else: - raise ValueError(f"Got unknown type {message}") - - return message_text - - def _convert_messages_to_prompt( - self, messages: list[PromptMessage], tools: Optional[list[PromptMessageTool]] = None - ) -> str: - """ - :param messages: List of PromptMessage to combine. - :return: Combined string with necessary human_prompt and ai_prompt tags. - """ - messages = messages.copy() # don't mutate the original list - - text = "".join(self._convert_one_message_to_text(message) for message in messages) - - if tools and len(tools) > 0: - text += "\n\nTools:" - for tool in tools: - text += f"\n{tool.json()}" - - # trim off the trailing ' ' that might come from the "Assistant: " - return text.rstrip() diff --git a/api/core/model_runtime/model_providers/zhipuai/text_embedding/__init__.py b/api/core/model_runtime/model_providers/zhipuai/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-2.yaml b/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-2.yaml deleted file mode 100644 index f1b8b35602..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-2.yaml +++ /dev/null @@ -1,8 +0,0 @@ -model: embedding-2 -model_type: text-embedding -model_properties: - context_size: 8192 -pricing: - input: '0.0005' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-3.yaml b/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-3.yaml deleted file mode 100644 index 5c55c911c4..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/text_embedding/embedding-3.yaml +++ /dev/null @@ -1,8 +0,0 @@ -model: embedding-3 -model_type: text-embedding -model_properties: - context_size: 8192 -pricing: - input: '0.0005' - unit: '0.001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.yaml b/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.yaml deleted file mode 100644 index b9f5bc6397..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.yaml +++ /dev/null @@ -1,4 +0,0 @@ -model: text_embedding -model_type: text-embedding -model_properties: - context_size: 512 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai.py deleted file mode 100644 index e75aad6eb0..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai.py +++ /dev/null @@ -1,27 +0,0 @@ -import logging - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.model_provider import ModelProvider - -logger = logging.getLogger(__name__) - - -class ZhipuaiProvider(ModelProvider): - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - try: - model_instance = self.get_model_instance(ModelType.LLM) - - model_instance.validate_credentials(model="glm-4", credentials=credentials) - except CredentialsValidateFailedError as ex: - raise ex - except Exception as ex: - logger.exception(f"{self.get_provider_schema().provider} credentials validate failed") - raise ex diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai.yaml b/api/core/model_runtime/model_providers/zhipuai/zhipuai.yaml deleted file mode 100644 index 303a549128..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai.yaml +++ /dev/null @@ -1,31 +0,0 @@ -provider: zhipuai -label: - zh_Hans: 智谱 AI - en_US: ZHIPU AI -icon_small: - en_US: icon_s_en.svg -icon_large: - zh_Hans: icon_l_zh.svg - en_US: icon_l_en.svg -background: "#EFF1FE" -help: - title: - en_US: Get your API key from ZHIPU AI - zh_Hans: 从智谱 AI 获取 API Key - url: - en_US: https://open.bigmodel.cn/usercenter/apikeys -supported_model_types: - - llm - - text-embedding -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: APIKey - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 APIKey - en_US: Enter your APIKey diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__init__.py deleted file mode 100644 index fc71d64714..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from .__version__ import __version__ -from ._client import ZhipuAI -from .core import ( - APIAuthenticationError, - APIConnectionError, - APIInternalError, - APIReachLimitError, - APIRequestFailedError, - APIResponseError, - APIResponseValidationError, - APIServerFlowExceedError, - APIStatusError, - APITimeoutError, - ZhipuAIError, -) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__version__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__version__.py deleted file mode 100644 index 51f8c49ecb..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/__version__.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "v2.1.0" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/_client.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/_client.py deleted file mode 100644 index 705d371e62..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/_client.py +++ /dev/null @@ -1,82 +0,0 @@ -from __future__ import annotations - -import os -from collections.abc import Mapping -from typing import Union - -import httpx -from httpx import Timeout -from typing_extensions import override - -from . import api_resource -from .core import NOT_GIVEN, ZHIPUAI_DEFAULT_MAX_RETRIES, HttpClient, NotGiven, ZhipuAIError, _jwt_token - - -class ZhipuAI(HttpClient): - chat: api_resource.chat.Chat - api_key: str - _disable_token_cache: bool = True - - def __init__( - self, - *, - api_key: str | None = None, - base_url: str | httpx.URL | None = None, - timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, - max_retries: int = ZHIPUAI_DEFAULT_MAX_RETRIES, - http_client: httpx.Client | None = None, - custom_headers: Mapping[str, str] | None = None, - disable_token_cache: bool = True, - _strict_response_validation: bool = False, - ) -> None: - if api_key is None: - api_key = os.environ.get("ZHIPUAI_API_KEY") - if api_key is None: - raise ZhipuAIError("未提供api_key,请通过参数或环境变量提供") - self.api_key = api_key - self._disable_token_cache = disable_token_cache - - if base_url is None: - base_url = os.environ.get("ZHIPUAI_BASE_URL") - if base_url is None: - base_url = "https://open.bigmodel.cn/api/paas/v4" - from .__version__ import __version__ - - super().__init__( - version=__version__, - base_url=base_url, - max_retries=max_retries, - timeout=timeout, - custom_httpx_client=http_client, - custom_headers=custom_headers, - _strict_response_validation=_strict_response_validation, - ) - self.chat = api_resource.chat.Chat(self) - self.images = api_resource.images.Images(self) - self.embeddings = api_resource.embeddings.Embeddings(self) - self.files = api_resource.files.Files(self) - self.fine_tuning = api_resource.fine_tuning.FineTuning(self) - self.batches = api_resource.Batches(self) - self.knowledge = api_resource.Knowledge(self) - self.tools = api_resource.Tools(self) - self.videos = api_resource.Videos(self) - self.assistant = api_resource.Assistant(self) - - @property - @override - def auth_headers(self) -> dict[str, str]: - api_key = self.api_key - if self._disable_token_cache: - return {"Authorization": f"Bearer {api_key}"} - else: - return {"Authorization": f"Bearer {_jwt_token.generate_token(api_key)}"} - - def __del__(self) -> None: - if not hasattr(self, "_has_custom_http_client") or not hasattr(self, "close") or not hasattr(self, "_client"): - # if the '__init__' method raised an error, self would not have client attr - return - - if self._has_custom_http_client: - return - - self.close() diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/__init__.py deleted file mode 100644 index 4fe0719dde..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .assistant import ( - Assistant, -) -from .batches import Batches -from .chat import ( - AsyncCompletions, - Chat, - Completions, -) -from .embeddings import Embeddings -from .files import Files, FilesWithRawResponse -from .fine_tuning import FineTuning -from .images import Images -from .knowledge import Knowledge -from .tools import Tools -from .videos import ( - Videos, -) - -__all__ = [ - "Videos", - "AsyncCompletions", - "Chat", - "Completions", - "Images", - "Embeddings", - "Files", - "FilesWithRawResponse", - "FineTuning", - "Batches", - "Knowledge", - "Tools", - "Assistant", -] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/__init__.py deleted file mode 100644 index ce619aa7f0..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .assistant import Assistant - -__all__ = ["Assistant"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/assistant.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/assistant.py deleted file mode 100644 index f772340a82..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/assistant/assistant.py +++ /dev/null @@ -1,122 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Optional - -import httpx - -from ...core import ( - NOT_GIVEN, - BaseAPI, - Body, - Headers, - NotGiven, - StreamResponse, - deepcopy_minimal, - make_request_options, - maybe_transform, -) -from ...types.assistant import AssistantCompletion -from ...types.assistant.assistant_conversation_resp import ConversationUsageListResp -from ...types.assistant.assistant_support_resp import AssistantSupportResp - -if TYPE_CHECKING: - from ..._client import ZhipuAI - -from ...types.assistant import assistant_conversation_params, assistant_create_params - -__all__ = ["Assistant"] - - -class Assistant(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - def conversation( - self, - assistant_id: str, - model: str, - messages: list[assistant_create_params.ConversationMessage], - *, - stream: bool = True, - conversation_id: Optional[str] = None, - attachments: Optional[list[assistant_create_params.AssistantAttachments]] = None, - metadata: dict | None = None, - request_id: str = None, - user_id: str = None, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> StreamResponse[AssistantCompletion]: - body = deepcopy_minimal( - { - "assistant_id": assistant_id, - "model": model, - "messages": messages, - "stream": stream, - "conversation_id": conversation_id, - "attachments": attachments, - "metadata": metadata, - "request_id": request_id, - "user_id": user_id, - } - ) - return self._post( - "/assistant", - body=maybe_transform(body, assistant_create_params.AssistantParameters), - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=AssistantCompletion, - stream=stream or True, - stream_cls=StreamResponse[AssistantCompletion], - ) - - def query_support( - self, - *, - assistant_id_list: list[str] = None, - request_id: str = None, - user_id: str = None, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AssistantSupportResp: - body = deepcopy_minimal( - { - "assistant_id_list": assistant_id_list, - "request_id": request_id, - "user_id": user_id, - } - ) - return self._post( - "/assistant/list", - body=body, - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=AssistantSupportResp, - ) - - def query_conversation_usage( - self, - assistant_id: str, - page: int = 1, - page_size: int = 10, - *, - request_id: str = None, - user_id: str = None, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ConversationUsageListResp: - body = deepcopy_minimal( - { - "assistant_id": assistant_id, - "page": page, - "page_size": page_size, - "request_id": request_id, - "user_id": user_id, - } - ) - return self._post( - "/assistant/conversation/list", - body=maybe_transform(body, assistant_conversation_params.ConversationParameters), - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=ConversationUsageListResp, - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/batches.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/batches.py deleted file mode 100644 index ae2f2be85e..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/batches.py +++ /dev/null @@ -1,146 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Literal, Optional - -import httpx - -from ..core import NOT_GIVEN, BaseAPI, Body, Headers, NotGiven, make_request_options, maybe_transform -from ..core.pagination import SyncCursorPage -from ..types import batch_create_params, batch_list_params -from ..types.batch import Batch - -if TYPE_CHECKING: - from .._client import ZhipuAI - - -class Batches(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - def create( - self, - *, - completion_window: str | None = None, - endpoint: Literal["/v1/chat/completions", "/v1/embeddings"], - input_file_id: str, - metadata: Optional[dict[str, str]] | NotGiven = NOT_GIVEN, - auto_delete_input_file: bool = True, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Batch: - return self._post( - "/batches", - body=maybe_transform( - { - "completion_window": completion_window, - "endpoint": endpoint, - "input_file_id": input_file_id, - "metadata": metadata, - "auto_delete_input_file": auto_delete_input_file, - }, - batch_create_params.BatchCreateParams, - ), - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=Batch, - ) - - def retrieve( - self, - batch_id: str, - *, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Batch: - """ - Retrieves a batch. - - Args: - extra_headers: Send extra headers - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not batch_id: - raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") - return self._get( - f"/batches/{batch_id}", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=Batch, - ) - - def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SyncCursorPage[Batch]: - """List your organization's batches. - - Args: - after: A cursor for use in pagination. - - `after` is an object ID that defines your place - in the list. For instance, if you make a list request and receive 100 objects, - ending with obj_foo, your subsequent call can include after=obj_foo in order to - fetch the next page of the list. - - limit: A limit on the number of objects to be returned. Limit can range between 1 and - 100, and the default is 20. - - extra_headers: Send extra headers - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get_api_list( - "/batches", - page=SyncCursorPage[Batch], - options=make_request_options( - extra_headers=extra_headers, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "after": after, - "limit": limit, - }, - batch_list_params.BatchListParams, - ), - ), - model=Batch, - ) - - def cancel( - self, - batch_id: str, - *, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Batch: - """ - Cancels an in-progress batch. - - Args: - batch_id: The ID of the batch to cancel. - extra_headers: Send extra headers - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - - """ - if not batch_id: - raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}") - return self._post( - f"/batches/{batch_id}/cancel", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=Batch, - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/__init__.py deleted file mode 100644 index 5cd8dc6f33..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .async_completions import AsyncCompletions -from .chat import Chat -from .completions import Completions - -__all__ = ["AsyncCompletions", "Chat", "Completions"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py deleted file mode 100644 index 05510a3ec4..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/async_completions.py +++ /dev/null @@ -1,115 +0,0 @@ -from __future__ import annotations - -import logging -from typing import TYPE_CHECKING, Literal, Optional, Union - -import httpx - -from ...core import ( - NOT_GIVEN, - BaseAPI, - Body, - Headers, - NotGiven, - drop_prefix_image_data, - make_request_options, - maybe_transform, -) -from ...types.chat.async_chat_completion import AsyncCompletion, AsyncTaskStatus -from ...types.chat.code_geex import code_geex_params -from ...types.sensitive_word_check import SensitiveWordCheckRequest - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from ..._client import ZhipuAI - - -class AsyncCompletions(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - def create( - self, - *, - model: str, - request_id: Optional[str] | NotGiven = NOT_GIVEN, - user_id: Optional[str] | NotGiven = NOT_GIVEN, - do_sample: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - max_tokens: int | NotGiven = NOT_GIVEN, - seed: int | NotGiven = NOT_GIVEN, - messages: Union[str, list[str], list[int], list[list[int]], None], - stop: Optional[Union[str, list[str], None]] | NotGiven = NOT_GIVEN, - sensitive_word_check: Optional[SensitiveWordCheckRequest] | NotGiven = NOT_GIVEN, - tools: Optional[object] | NotGiven = NOT_GIVEN, - tool_choice: str | NotGiven = NOT_GIVEN, - meta: Optional[dict[str, str]] | NotGiven = NOT_GIVEN, - extra: Optional[code_geex_params.CodeGeexExtra] | NotGiven = NOT_GIVEN, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AsyncTaskStatus: - _cast_type = AsyncTaskStatus - logger.debug(f"temperature:{temperature}, top_p:{top_p}") - if temperature is not None and temperature != NOT_GIVEN: - if temperature <= 0: - do_sample = False - temperature = 0.01 - # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperature不生效)") # noqa: E501 - if temperature >= 1: - temperature = 0.99 - # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间") - if top_p is not None and top_p != NOT_GIVEN: - if top_p >= 1: - top_p = 0.99 - # logger.warning("top_p:取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1") - if top_p <= 0: - top_p = 0.01 - # logger.warning("top_p:取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1") - - logger.debug(f"temperature:{temperature}, top_p:{top_p}") - if isinstance(messages, list): - for item in messages: - if item.get("content"): - item["content"] = drop_prefix_image_data(item["content"]) - - body = { - "model": model, - "request_id": request_id, - "user_id": user_id, - "temperature": temperature, - "top_p": top_p, - "do_sample": do_sample, - "max_tokens": max_tokens, - "seed": seed, - "messages": messages, - "stop": stop, - "sensitive_word_check": sensitive_word_check, - "tools": tools, - "tool_choice": tool_choice, - "meta": meta, - "extra": maybe_transform(extra, code_geex_params.CodeGeexExtra), - } - return self._post( - "/async/chat/completions", - body=body, - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=_cast_type, - stream=False, - ) - - def retrieve_completion_result( - self, - id: str, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Union[AsyncCompletion, AsyncTaskStatus]: - _cast_type = Union[AsyncCompletion, AsyncTaskStatus] - return self._get( - path=f"/async-result/{id}", - cast_type=_cast_type, - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/chat.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/chat.py deleted file mode 100644 index b3cc46566c..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/chat.py +++ /dev/null @@ -1,18 +0,0 @@ -from typing import TYPE_CHECKING - -from ...core import BaseAPI, cached_property -from .async_completions import AsyncCompletions -from .completions import Completions - -if TYPE_CHECKING: - pass - - -class Chat(BaseAPI): - @cached_property - def completions(self) -> Completions: - return Completions(self._client) - - @cached_property - def asyncCompletions(self) -> AsyncCompletions: # noqa: N802 - return AsyncCompletions(self._client) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py deleted file mode 100644 index 8e5bb454e6..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/chat/completions.py +++ /dev/null @@ -1,108 +0,0 @@ -from __future__ import annotations - -import logging -from typing import TYPE_CHECKING, Literal, Optional, Union - -import httpx - -from ...core import ( - NOT_GIVEN, - BaseAPI, - Body, - Headers, - NotGiven, - StreamResponse, - deepcopy_minimal, - drop_prefix_image_data, - make_request_options, - maybe_transform, -) -from ...types.chat.chat_completion import Completion -from ...types.chat.chat_completion_chunk import ChatCompletionChunk -from ...types.chat.code_geex import code_geex_params -from ...types.sensitive_word_check import SensitiveWordCheckRequest - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from ..._client import ZhipuAI - - -class Completions(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - def create( - self, - *, - model: str, - request_id: Optional[str] | NotGiven = NOT_GIVEN, - user_id: Optional[str] | NotGiven = NOT_GIVEN, - do_sample: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - max_tokens: int | NotGiven = NOT_GIVEN, - seed: int | NotGiven = NOT_GIVEN, - messages: Union[str, list[str], list[int], object, None], - stop: Optional[Union[str, list[str], None]] | NotGiven = NOT_GIVEN, - sensitive_word_check: Optional[SensitiveWordCheckRequest] | NotGiven = NOT_GIVEN, - tools: Optional[object] | NotGiven = NOT_GIVEN, - tool_choice: str | NotGiven = NOT_GIVEN, - meta: Optional[dict[str, str]] | NotGiven = NOT_GIVEN, - extra: Optional[code_geex_params.CodeGeexExtra] | NotGiven = NOT_GIVEN, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Completion | StreamResponse[ChatCompletionChunk]: - logger.debug(f"temperature:{temperature}, top_p:{top_p}") - if temperature is not None and temperature != NOT_GIVEN: - if temperature <= 0: - do_sample = False - temperature = 0.01 - # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间,do_sample重写为:false(参数top_p temperature不生效)") # noqa: E501 - if temperature >= 1: - temperature = 0.99 - # logger.warning("temperature:取值范围是:(0.0, 1.0) 开区间") - if top_p is not None and top_p != NOT_GIVEN: - if top_p >= 1: - top_p = 0.99 - # logger.warning("top_p:取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1") - if top_p <= 0: - top_p = 0.01 - # logger.warning("top_p:取值范围是:(0.0, 1.0) 开区间,不能等于 0 或 1") - - logger.debug(f"temperature:{temperature}, top_p:{top_p}") - if isinstance(messages, list): - for item in messages: - if item.get("content"): - item["content"] = drop_prefix_image_data(item["content"]) - - body = deepcopy_minimal( - { - "model": model, - "request_id": request_id, - "user_id": user_id, - "temperature": temperature, - "top_p": top_p, - "do_sample": do_sample, - "max_tokens": max_tokens, - "seed": seed, - "messages": messages, - "stop": stop, - "sensitive_word_check": sensitive_word_check, - "stream": stream, - "tools": tools, - "tool_choice": tool_choice, - "meta": meta, - "extra": maybe_transform(extra, code_geex_params.CodeGeexExtra), - } - ) - return self._post( - "/chat/completions", - body=body, - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=Completion, - stream=stream or False, - stream_cls=StreamResponse[ChatCompletionChunk], - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/embeddings.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/embeddings.py deleted file mode 100644 index 4b4baef942..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/embeddings.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Optional, Union - -import httpx - -from ..core import NOT_GIVEN, BaseAPI, Body, Headers, NotGiven, make_request_options -from ..types.embeddings import EmbeddingsResponded - -if TYPE_CHECKING: - from .._client import ZhipuAI - - -class Embeddings(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - def create( - self, - *, - input: Union[str, list[str], list[int], list[list[int]]], - model: Union[str], - dimensions: Union[int] | NotGiven = NOT_GIVEN, - encoding_format: str | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - request_id: Optional[str] | NotGiven = NOT_GIVEN, - sensitive_word_check: Optional[object] | NotGiven = NOT_GIVEN, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - disable_strict_validation: Optional[bool] | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EmbeddingsResponded: - _cast_type = EmbeddingsResponded - if disable_strict_validation: - _cast_type = object - return self._post( - "/embeddings", - body={ - "input": input, - "model": model, - "dimensions": dimensions, - "encoding_format": encoding_format, - "user": user, - "request_id": request_id, - "sensitive_word_check": sensitive_word_check, - }, - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=_cast_type, - stream=False, - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/files.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/files.py deleted file mode 100644 index ba9de75b7e..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/files.py +++ /dev/null @@ -1,194 +0,0 @@ -from __future__ import annotations - -from collections.abc import Mapping -from typing import TYPE_CHECKING, Literal, cast - -import httpx - -from ..core import ( - NOT_GIVEN, - BaseAPI, - Body, - FileTypes, - Headers, - NotGiven, - _legacy_binary_response, - _legacy_response, - deepcopy_minimal, - extract_files, - make_request_options, - maybe_transform, -) -from ..types.files import FileDeleted, FileObject, ListOfFileObject, UploadDetail, file_create_params - -if TYPE_CHECKING: - from .._client import ZhipuAI - -__all__ = ["Files", "FilesWithRawResponse"] - - -class Files(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - def create( - self, - *, - file: FileTypes = None, - upload_detail: list[UploadDetail] = None, - purpose: Literal["fine-tune", "retrieval", "batch"], - knowledge_id: str = None, - sentence_size: int = None, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FileObject: - if not file and not upload_detail: - raise ValueError("At least one of `file` and `upload_detail` must be provided.") - body = deepcopy_minimal( - { - "file": file, - "upload_detail": upload_detail, - "purpose": purpose, - "knowledge_id": knowledge_id, - "sentence_size": sentence_size, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - if files: - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - "/files", - body=maybe_transform(body, file_create_params.FileCreateParams), - files=files, - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=FileObject, - ) - - # def retrieve( - # self, - # file_id: str, - # *, - # extra_headers: Headers | None = None, - # extra_body: Body | None = None, - # timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - # ) -> FileObject: - # """ - # Returns information about a specific file. - # - # Args: - # file_id: The ID of the file to retrieve information about - # extra_headers: Send extra headers - # - # extra_body: Add additional JSON properties to the request - # - # timeout: Override the client-level default timeout for this request, in seconds - # """ - # if not file_id: - # raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - # return self._get( - # f"/files/{file_id}", - # options=make_request_options( - # extra_headers=extra_headers, extra_body=extra_body, timeout=timeout - # ), - # cast_type=FileObject, - # ) - - def list( - self, - *, - purpose: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - after: str | NotGiven = NOT_GIVEN, - order: str | NotGiven = NOT_GIVEN, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListOfFileObject: - return self._get( - "/files", - cast_type=ListOfFileObject, - options=make_request_options( - extra_headers=extra_headers, - extra_body=extra_body, - timeout=timeout, - query={ - "purpose": purpose, - "limit": limit, - "after": after, - "order": order, - }, - ), - ) - - def delete( - self, - file_id: str, - *, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FileDeleted: - """ - Delete a file. - - Args: - file_id: The ID of the file to delete - extra_headers: Send extra headers - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - return self._delete( - f"/files/{file_id}", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=FileDeleted, - ) - - def content( - self, - file_id: str, - *, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> _legacy_response.HttpxBinaryResponseContent: - """ - Returns the contents of the specified file. - - Args: - extra_headers: Send extra headers - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not file_id: - raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") - extra_headers = {"Accept": "application/binary", **(extra_headers or {})} - return self._get( - f"/files/{file_id}/content", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=_legacy_binary_response.HttpxBinaryResponseContent, - ) - - -class FilesWithRawResponse: - def __init__(self, files: Files) -> None: - self._files = files - - self.create = _legacy_response.to_raw_response_wrapper( - files.create, - ) - self.list = _legacy_response.to_raw_response_wrapper( - files.list, - ) - self.content = _legacy_response.to_raw_response_wrapper( - files.content, - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/__init__.py deleted file mode 100644 index 7c309b8341..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .fine_tuning import FineTuning -from .jobs import Jobs -from .models import FineTunedModels - -__all__ = ["Jobs", "FineTunedModels", "FineTuning"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/fine_tuning.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/fine_tuning.py deleted file mode 100644 index 8670f7de00..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/fine_tuning.py +++ /dev/null @@ -1,18 +0,0 @@ -from typing import TYPE_CHECKING - -from ...core import BaseAPI, cached_property -from .jobs import Jobs -from .models import FineTunedModels - -if TYPE_CHECKING: - pass - - -class FineTuning(BaseAPI): - @cached_property - def jobs(self) -> Jobs: - return Jobs(self._client) - - @cached_property - def models(self) -> FineTunedModels: - return FineTunedModels(self._client) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/__init__.py deleted file mode 100644 index 40777a153f..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .jobs import Jobs - -__all__ = ["Jobs"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/jobs.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/jobs.py deleted file mode 100644 index 8b038cadc0..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/jobs/jobs.py +++ /dev/null @@ -1,152 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Optional - -import httpx - -from ....core import ( - NOT_GIVEN, - BaseAPI, - Body, - Headers, - NotGiven, - make_request_options, -) -from ....types.fine_tuning import ( - FineTuningJob, - FineTuningJobEvent, - ListOfFineTuningJob, - job_create_params, -) - -if TYPE_CHECKING: - from ...._client import ZhipuAI - -__all__ = ["Jobs"] - - -class Jobs(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - def create( - self, - *, - model: str, - training_file: str, - hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, - suffix: Optional[str] | NotGiven = NOT_GIVEN, - request_id: Optional[str] | NotGiven = NOT_GIVEN, - validation_file: Optional[str] | NotGiven = NOT_GIVEN, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FineTuningJob: - return self._post( - "/fine_tuning/jobs", - body={ - "model": model, - "training_file": training_file, - "hyperparameters": hyperparameters, - "suffix": suffix, - "validation_file": validation_file, - "request_id": request_id, - }, - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=FineTuningJob, - ) - - def retrieve( - self, - fine_tuning_job_id: str, - *, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FineTuningJob: - return self._get( - f"/fine_tuning/jobs/{fine_tuning_job_id}", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=FineTuningJob, - ) - - def list( - self, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ListOfFineTuningJob: - return self._get( - "/fine_tuning/jobs", - cast_type=ListOfFineTuningJob, - options=make_request_options( - extra_headers=extra_headers, - extra_body=extra_body, - timeout=timeout, - query={ - "after": after, - "limit": limit, - }, - ), - ) - - def cancel( - self, - fine_tuning_job_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # noqa: E501 - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FineTuningJob: - if not fine_tuning_job_id: - raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") - return self._post( - f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=FineTuningJob, - ) - - def list_events( - self, - fine_tuning_job_id: str, - *, - after: str | NotGiven = NOT_GIVEN, - limit: int | NotGiven = NOT_GIVEN, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FineTuningJobEvent: - return self._get( - f"/fine_tuning/jobs/{fine_tuning_job_id}/events", - cast_type=FineTuningJobEvent, - options=make_request_options( - extra_headers=extra_headers, - extra_body=extra_body, - timeout=timeout, - query={ - "after": after, - "limit": limit, - }, - ), - ) - - def delete( - self, - fine_tuning_job_id: str, - *, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FineTuningJob: - if not fine_tuning_job_id: - raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") - return self._delete( - f"/fine_tuning/jobs/{fine_tuning_job_id}", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=FineTuningJob, - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/__init__.py deleted file mode 100644 index d832635baf..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .fine_tuned_models import FineTunedModels - -__all__ = ["FineTunedModels"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/fine_tuned_models.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/fine_tuned_models.py deleted file mode 100644 index 29c023e3b1..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/fine_tuning/models/fine_tuned_models.py +++ /dev/null @@ -1,41 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -import httpx - -from ....core import ( - NOT_GIVEN, - BaseAPI, - Body, - Headers, - NotGiven, - make_request_options, -) -from ....types.fine_tuning.models import FineTunedModelsStatus - -if TYPE_CHECKING: - from ...._client import ZhipuAI - -__all__ = ["FineTunedModels"] - - -class FineTunedModels(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - def delete( - self, - fine_tuned_model: str, - *, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FineTunedModelsStatus: - if not fine_tuned_model: - raise ValueError(f"Expected a non-empty value for `fine_tuned_model` but received {fine_tuned_model!r}") - return self._delete( - f"fine_tuning/fine_tuned_models/{fine_tuned_model}", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=FineTunedModelsStatus, - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/images.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/images.py deleted file mode 100644 index 8ad411913f..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/images.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Optional - -import httpx - -from ..core import NOT_GIVEN, BaseAPI, Body, Headers, NotGiven, make_request_options -from ..types.image import ImagesResponded -from ..types.sensitive_word_check import SensitiveWordCheckRequest - -if TYPE_CHECKING: - from .._client import ZhipuAI - - -class Images(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - def generations( - self, - *, - prompt: str, - model: str | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - quality: Optional[str] | NotGiven = NOT_GIVEN, - response_format: Optional[str] | NotGiven = NOT_GIVEN, - size: Optional[str] | NotGiven = NOT_GIVEN, - style: Optional[str] | NotGiven = NOT_GIVEN, - sensitive_word_check: Optional[SensitiveWordCheckRequest] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - request_id: Optional[str] | NotGiven = NOT_GIVEN, - user_id: Optional[str] | NotGiven = NOT_GIVEN, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - disable_strict_validation: Optional[bool] | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponded: - _cast_type = ImagesResponded - if disable_strict_validation: - _cast_type = object - return self._post( - "/images/generations", - body={ - "prompt": prompt, - "model": model, - "n": n, - "quality": quality, - "response_format": response_format, - "sensitive_word_check": sensitive_word_check, - "size": size, - "style": style, - "user": user, - "user_id": user_id, - "request_id": request_id, - }, - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=_cast_type, - stream=False, - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/__init__.py deleted file mode 100644 index 5a67d743c3..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .knowledge import Knowledge - -__all__ = ["Knowledge"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/__init__.py deleted file mode 100644 index fd289e2232..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .document import Document - -__all__ = ["Document"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/document.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/document.py deleted file mode 100644 index 2c4066d893..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/document/document.py +++ /dev/null @@ -1,217 +0,0 @@ -from __future__ import annotations - -from collections.abc import Mapping -from typing import TYPE_CHECKING, Literal, Optional, cast - -import httpx - -from ....core import ( - NOT_GIVEN, - BaseAPI, - Body, - FileTypes, - Headers, - NotGiven, - deepcopy_minimal, - extract_files, - make_request_options, - maybe_transform, -) -from ....types.files import UploadDetail, file_create_params -from ....types.knowledge.document import DocumentData, DocumentObject, document_edit_params, document_list_params -from ....types.knowledge.document.document_list_resp import DocumentPage - -if TYPE_CHECKING: - from ...._client import ZhipuAI - -__all__ = ["Document"] - - -class Document(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - def create( - self, - *, - file: FileTypes = None, - custom_separator: Optional[list[str]] = None, - upload_detail: list[UploadDetail] = None, - purpose: Literal["retrieval"], - knowledge_id: str = None, - sentence_size: int = None, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DocumentObject: - if not file and not upload_detail: - raise ValueError("At least one of `file` and `upload_detail` must be provided.") - body = deepcopy_minimal( - { - "file": file, - "upload_detail": upload_detail, - "purpose": purpose, - "custom_separator": custom_separator, - "knowledge_id": knowledge_id, - "sentence_size": sentence_size, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) - if files: - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return self._post( - "/files", - body=maybe_transform(body, file_create_params.FileCreateParams), - files=files, - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=DocumentObject, - ) - - def edit( - self, - document_id: str, - knowledge_type: str, - *, - custom_separator: Optional[list[str]] = None, - sentence_size: Optional[int] = None, - callback_url: Optional[str] = None, - callback_header: Optional[dict[str, str]] = None, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> httpx.Response: - """ - - Args: - document_id: 知识id - knowledge_type: 知识类型: - 1:文章知识: 支持pdf,url,docx - 2.问答知识-文档: 支持pdf,url,docx - 3.问答知识-表格: 支持xlsx - 4.商品库-表格: 支持xlsx - 5.自定义: 支持pdf,url,docx - extra_headers: Send extra headers - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - :param knowledge_type: - :param document_id: - :param timeout: - :param extra_body: - :param callback_header: - :param sentence_size: - :param extra_headers: - :param callback_url: - :param custom_separator: - """ - if not document_id: - raise ValueError(f"Expected a non-empty value for `document_id` but received {document_id!r}") - - body = deepcopy_minimal( - { - "id": document_id, - "knowledge_type": knowledge_type, - "custom_separator": custom_separator, - "sentence_size": sentence_size, - "callback_url": callback_url, - "callback_header": callback_header, - } - ) - - return self._put( - f"/document/{document_id}", - body=maybe_transform(body, document_edit_params.DocumentEditParams), - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=httpx.Response, - ) - - def list( - self, - knowledge_id: str, - *, - purpose: str | NotGiven = NOT_GIVEN, - page: str | NotGiven = NOT_GIVEN, - limit: str | NotGiven = NOT_GIVEN, - order: Literal["desc", "asc"] | NotGiven = NOT_GIVEN, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DocumentPage: - return self._get( - "/files", - options=make_request_options( - extra_headers=extra_headers, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "knowledge_id": knowledge_id, - "purpose": purpose, - "page": page, - "limit": limit, - "order": order, - }, - document_list_params.DocumentListParams, - ), - ), - cast_type=DocumentPage, - ) - - def delete( - self, - document_id: str, - *, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> httpx.Response: - """ - Delete a file. - - Args: - - document_id: 知识id - extra_headers: Send extra headers - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not document_id: - raise ValueError(f"Expected a non-empty value for `document_id` but received {document_id!r}") - - return self._delete( - f"/document/{document_id}", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=httpx.Response, - ) - - def retrieve( - self, - document_id: str, - *, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DocumentData: - """ - - Args: - extra_headers: Send extra headers - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not document_id: - raise ValueError(f"Expected a non-empty value for `document_id` but received {document_id!r}") - - return self._get( - f"/document/{document_id}", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=DocumentData, - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/knowledge.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/knowledge.py deleted file mode 100644 index fea4c73ac9..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/knowledge/knowledge.py +++ /dev/null @@ -1,173 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Literal, Optional - -import httpx - -from ...core import ( - NOT_GIVEN, - BaseAPI, - Body, - Headers, - NotGiven, - cached_property, - deepcopy_minimal, - make_request_options, - maybe_transform, -) -from ...types.knowledge import KnowledgeInfo, KnowledgeUsed, knowledge_create_params, knowledge_list_params -from ...types.knowledge.knowledge_list_resp import KnowledgePage -from .document import Document - -if TYPE_CHECKING: - from ..._client import ZhipuAI - -__all__ = ["Knowledge"] - - -class Knowledge(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - @cached_property - def document(self) -> Document: - return Document(self._client) - - def create( - self, - embedding_id: int, - name: str, - *, - customer_identifier: Optional[str] = None, - description: Optional[str] = None, - background: Optional[Literal["blue", "red", "orange", "purple", "sky"]] = None, - icon: Optional[Literal["question", "book", "seal", "wrench", "tag", "horn", "house"]] = None, - bucket_id: Optional[str] = None, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeInfo: - body = deepcopy_minimal( - { - "embedding_id": embedding_id, - "name": name, - "customer_identifier": customer_identifier, - "description": description, - "background": background, - "icon": icon, - "bucket_id": bucket_id, - } - ) - return self._post( - "/knowledge", - body=maybe_transform(body, knowledge_create_params.KnowledgeBaseParams), - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=KnowledgeInfo, - ) - - def modify( - self, - knowledge_id: str, - embedding_id: int, - *, - name: str, - description: Optional[str] = None, - background: Optional[Literal["blue", "red", "orange", "purple", "sky"]] = None, - icon: Optional[Literal["question", "book", "seal", "wrench", "tag", "horn", "house"]] = None, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> httpx.Response: - body = deepcopy_minimal( - { - "id": knowledge_id, - "embedding_id": embedding_id, - "name": name, - "description": description, - "background": background, - "icon": icon, - } - ) - return self._put( - f"/knowledge/{knowledge_id}", - body=maybe_transform(body, knowledge_create_params.KnowledgeBaseParams), - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=httpx.Response, - ) - - def query( - self, - *, - page: int | NotGiven = 1, - size: int | NotGiven = 10, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgePage: - return self._get( - "/knowledge", - options=make_request_options( - extra_headers=extra_headers, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "size": size, - }, - knowledge_list_params.KnowledgeListParams, - ), - ), - cast_type=KnowledgePage, - ) - - def delete( - self, - knowledge_id: str, - *, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> httpx.Response: - """ - Delete a file. - - Args: - knowledge_id: 知识库ID - extra_headers: Send extra headers - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not knowledge_id: - raise ValueError("Expected a non-empty value for `knowledge_id`") - - return self._delete( - f"/knowledge/{knowledge_id}", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=httpx.Response, - ) - - def used( - self, - *, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeUsed: - """ - Returns the contents of the specified file. - - Args: - extra_headers: Send extra headers - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/knowledge/capacity", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=KnowledgeUsed, - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/__init__.py deleted file mode 100644 index 43e4e37da1..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .tools import Tools - -__all__ = ["Tools"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/tools.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/tools.py deleted file mode 100644 index 3c3a630aff..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/tools/tools.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import annotations - -import logging -from typing import TYPE_CHECKING, Literal, Optional, Union - -import httpx - -from ...core import ( - NOT_GIVEN, - BaseAPI, - Body, - Headers, - NotGiven, - StreamResponse, - deepcopy_minimal, - make_request_options, - maybe_transform, -) -from ...types.tools import WebSearch, WebSearchChunk, tools_web_search_params - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from ..._client import ZhipuAI - -__all__ = ["Tools"] - - -class Tools(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - def web_search( - self, - *, - model: str, - request_id: Optional[str] | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - messages: Union[str, list[str], list[int], object, None], - scope: Optional[str] | NotGiven = NOT_GIVEN, - location: Optional[str] | NotGiven = NOT_GIVEN, - recent_days: Optional[int] | NotGiven = NOT_GIVEN, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> WebSearch | StreamResponse[WebSearchChunk]: - body = deepcopy_minimal( - { - "model": model, - "request_id": request_id, - "messages": messages, - "stream": stream, - "scope": scope, - "location": location, - "recent_days": recent_days, - } - ) - return self._post( - "/tools", - body=maybe_transform(body, tools_web_search_params.WebSearchParams), - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=WebSearch, - stream=stream or False, - stream_cls=StreamResponse[WebSearchChunk], - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/__init__.py deleted file mode 100644 index 6b0f99ed09..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .videos import ( - Videos, -) - -__all__ = [ - "Videos", -] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/videos.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/videos.py deleted file mode 100644 index f1f1c08036..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/api_resource/videos/videos.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING, Optional - -import httpx - -from ...core import ( - NOT_GIVEN, - BaseAPI, - Body, - Headers, - NotGiven, - deepcopy_minimal, - make_request_options, - maybe_transform, -) -from ...types.sensitive_word_check import SensitiveWordCheckRequest -from ...types.video import VideoObject, video_create_params - -if TYPE_CHECKING: - from ..._client import ZhipuAI - -__all__ = ["Videos"] - - -class Videos(BaseAPI): - def __init__(self, client: ZhipuAI) -> None: - super().__init__(client) - - def generations( - self, - model: str, - *, - prompt: str = None, - image_url: str = None, - sensitive_word_check: Optional[SensitiveWordCheckRequest] | NotGiven = NOT_GIVEN, - request_id: str = None, - user_id: str = None, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VideoObject: - if not model and not model: - raise ValueError("At least one of `model` and `prompt` must be provided.") - body = deepcopy_minimal( - { - "model": model, - "prompt": prompt, - "image_url": image_url, - "sensitive_word_check": sensitive_word_check, - "request_id": request_id, - "user_id": user_id, - } - ) - return self._post( - "/videos/generations", - body=maybe_transform(body, video_create_params.VideoCreateParams), - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=VideoObject, - ) - - def retrieve_videos_result( - self, - id: str, - *, - extra_headers: Headers | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VideoObject: - if not id: - raise ValueError("At least one of `id` must be provided.") - - return self._get( - f"/async-result/{id}", - options=make_request_options(extra_headers=extra_headers, extra_body=extra_body, timeout=timeout), - cast_type=VideoObject, - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/__init__.py deleted file mode 100644 index 3d6466d279..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/__init__.py +++ /dev/null @@ -1,108 +0,0 @@ -from ._base_api import BaseAPI -from ._base_compat import ( - PYDANTIC_V2, - ConfigDict, - GenericModel, - cached_property, - field_get_default, - get_args, - get_model_config, - get_model_fields, - get_origin, - is_literal_type, - is_union, - parse_obj, -) -from ._base_models import BaseModel, construct_type -from ._base_type import ( - NOT_GIVEN, - Body, - FileTypes, - Headers, - IncEx, - ModelT, - NotGiven, - Query, -) -from ._constants import ( - ZHIPUAI_DEFAULT_LIMITS, - ZHIPUAI_DEFAULT_MAX_RETRIES, - ZHIPUAI_DEFAULT_TIMEOUT, -) -from ._errors import ( - APIAuthenticationError, - APIConnectionError, - APIInternalError, - APIReachLimitError, - APIRequestFailedError, - APIResponseError, - APIResponseValidationError, - APIServerFlowExceedError, - APIStatusError, - APITimeoutError, - ZhipuAIError, -) -from ._files import is_file_content -from ._http_client import HttpClient, make_request_options -from ._sse_client import StreamResponse -from ._utils import ( - deepcopy_minimal, - drop_prefix_image_data, - extract_files, - is_given, - is_list, - is_mapping, - maybe_transform, - parse_date, - parse_datetime, -) - -__all__ = [ - "BaseModel", - "construct_type", - "BaseAPI", - "NOT_GIVEN", - "Headers", - "NotGiven", - "Body", - "IncEx", - "ModelT", - "Query", - "FileTypes", - "PYDANTIC_V2", - "ConfigDict", - "GenericModel", - "get_args", - "is_union", - "parse_obj", - "get_origin", - "is_literal_type", - "get_model_config", - "get_model_fields", - "field_get_default", - "is_file_content", - "ZhipuAIError", - "APIStatusError", - "APIRequestFailedError", - "APIAuthenticationError", - "APIReachLimitError", - "APIInternalError", - "APIServerFlowExceedError", - "APIResponseError", - "APIResponseValidationError", - "APITimeoutError", - "make_request_options", - "HttpClient", - "ZHIPUAI_DEFAULT_TIMEOUT", - "ZHIPUAI_DEFAULT_MAX_RETRIES", - "ZHIPUAI_DEFAULT_LIMITS", - "is_list", - "is_mapping", - "parse_date", - "parse_datetime", - "is_given", - "maybe_transform", - "deepcopy_minimal", - "extract_files", - "StreamResponse", -] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_api.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_api.py deleted file mode 100644 index 3592ea6bac..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_api.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from .._client import ZhipuAI - - -class BaseAPI: - _client: ZhipuAI - - def __init__(self, client: ZhipuAI) -> None: - self._client = client - self._delete = client.delete - self._get = client.get - self._post = client.post - self._put = client.put - self._patch = client.patch - self._get_api_list = client.get_api_list diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_compat.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_compat.py deleted file mode 100644 index 92a5d683be..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_compat.py +++ /dev/null @@ -1,209 +0,0 @@ -from __future__ import annotations - -from collections.abc import Callable -from datetime import date, datetime -from typing import TYPE_CHECKING, Any, Generic, TypeVar, Union, cast, overload - -import pydantic -from pydantic.fields import FieldInfo -from typing_extensions import Self - -from ._base_type import StrBytesIntFloat - -_T = TypeVar("_T") -_ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel) - -# --------------- Pydantic v2 compatibility --------------- - -# Pyright incorrectly reports some of our functions as overriding a method when they don't -# pyright: reportIncompatibleMethodOverride=false - -PYDANTIC_V2 = pydantic.VERSION.startswith("2.") - -# v1 re-exports -if TYPE_CHECKING: - - def parse_date(value: date | StrBytesIntFloat) -> date: ... - - def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: ... - - def get_args(t: type[Any]) -> tuple[Any, ...]: ... - - def is_union(tp: type[Any] | None) -> bool: ... - - def get_origin(t: type[Any]) -> type[Any] | None: ... - - def is_literal_type(type_: type[Any]) -> bool: ... - - def is_typeddict(type_: type[Any]) -> bool: ... - -else: - if PYDANTIC_V2: - from pydantic.v1.typing import ( # noqa: I001 - get_args as get_args, # noqa: PLC0414 - is_union as is_union, # noqa: PLC0414 - get_origin as get_origin, # noqa: PLC0414 - is_typeddict as is_typeddict, # noqa: PLC0414 - is_literal_type as is_literal_type, # noqa: PLC0414 - ) - from pydantic.v1.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime # noqa: PLC0414 - else: - from pydantic.typing import ( # noqa: I001 - get_args as get_args, # noqa: PLC0414 - is_union as is_union, # noqa: PLC0414 - get_origin as get_origin, # noqa: PLC0414 - is_typeddict as is_typeddict, # noqa: PLC0414 - is_literal_type as is_literal_type, # noqa: PLC0414 - ) - from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime # noqa: PLC0414 - - -# refactored config -if TYPE_CHECKING: - from pydantic import ConfigDict -else: - if PYDANTIC_V2: - from pydantic import ConfigDict - else: - # TODO: provide an error message here? - ConfigDict = None - - -# renamed methods / properties -def parse_obj(model: type[_ModelT], value: object) -> _ModelT: - if PYDANTIC_V2: - return model.model_validate(value) - else: - # pyright: ignore[reportDeprecated, reportUnnecessaryCast] - return cast(_ModelT, model.parse_obj(value)) - - -def field_is_required(field: FieldInfo) -> bool: - if PYDANTIC_V2: - return field.is_required() - return field.required # type: ignore - - -def field_get_default(field: FieldInfo) -> Any: - value = field.get_default() - if PYDANTIC_V2: - from pydantic_core import PydanticUndefined - - if value == PydanticUndefined: - return None - return value - return value - - -def field_outer_type(field: FieldInfo) -> Any: - if PYDANTIC_V2: - return field.annotation - return field.outer_type_ # type: ignore - - -def get_model_config(model: type[pydantic.BaseModel]) -> Any: - if PYDANTIC_V2: - return model.model_config - return model.__config__ # type: ignore - - -def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]: - if PYDANTIC_V2: - return model.model_fields - return model.__fields__ # type: ignore - - -def model_copy(model: _ModelT) -> _ModelT: - if PYDANTIC_V2: - return model.model_copy() - return model.copy() # type: ignore - - -def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: - if PYDANTIC_V2: - return model.model_dump_json(indent=indent) - return model.json(indent=indent) # type: ignore - - -def model_dump( - model: pydantic.BaseModel, - *, - exclude_unset: bool = False, - exclude_defaults: bool = False, -) -> dict[str, Any]: - if PYDANTIC_V2: - return model.model_dump( - exclude_unset=exclude_unset, - exclude_defaults=exclude_defaults, - ) - return cast( - "dict[str, Any]", - model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast] - exclude_unset=exclude_unset, - exclude_defaults=exclude_defaults, - ), - ) - - -def model_parse(model: type[_ModelT], data: Any) -> _ModelT: - if PYDANTIC_V2: - return model.model_validate(data) - return model.parse_obj(data) # pyright: ignore[reportDeprecated] - - -# generic models -if TYPE_CHECKING: - - class GenericModel(pydantic.BaseModel): ... - -else: - if PYDANTIC_V2: - # there no longer needs to be a distinction in v2 but - # we still have to create our own subclass to avoid - # inconsistent MRO ordering errors - class GenericModel(pydantic.BaseModel): ... - - else: - import pydantic.generics - - class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ... - - -# cached properties -if TYPE_CHECKING: - cached_property = property - - # we define a separate type (copied from typeshed) - # that represents that `cached_property` is `set`able - # at runtime, which differs from `@property`. - # - # this is a separate type as editors likely special case - # `@property` and we don't want to cause issues just to have - # more helpful internal types. - - class typed_cached_property(Generic[_T]): # noqa: N801 - func: Callable[[Any], _T] - attrname: str | None - - def __init__(self, func: Callable[[Any], _T]) -> None: ... - - @overload - def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ... - - @overload - def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ... - - def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self: - raise NotImplementedError() - - def __set_name__(self, owner: type[Any], name: str) -> None: ... - - # __set__ is not defined at runtime, but @cached_property is designed to be settable - def __set__(self, instance: object, value: _T) -> None: ... -else: - try: - from functools import cached_property - except ImportError: - from cached_property import cached_property - - typed_cached_property = cached_property diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py deleted file mode 100644 index 6d8ba700b7..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_models.py +++ /dev/null @@ -1,670 +0,0 @@ -from __future__ import annotations - -import inspect -import os -from collections.abc import Callable -from datetime import date, datetime -from typing import TYPE_CHECKING, Any, ClassVar, Generic, Literal, TypeGuard, TypeVar, cast - -import pydantic -import pydantic.generics -from pydantic.fields import FieldInfo -from typing_extensions import ( - ParamSpec, - Protocol, - override, - runtime_checkable, -) - -from ._base_compat import ( - PYDANTIC_V2, - ConfigDict, - field_get_default, - get_args, - get_model_config, - get_model_fields, - get_origin, - is_literal_type, - is_union, - parse_obj, -) -from ._base_compat import ( - GenericModel as BaseGenericModel, -) -from ._base_type import ( - IncEx, - ModelT, -) -from ._utils import ( - PropertyInfo, - coerce_boolean, - extract_type_arg, - is_annotated_type, - is_list, - is_mapping, - parse_date, - parse_datetime, - strip_annotated_type, -) - -if TYPE_CHECKING: - from pydantic_core.core_schema import LiteralSchema, ModelField, ModelFieldsSchema - -__all__ = ["BaseModel", "GenericModel"] -_BaseModelT = TypeVar("_BaseModelT", bound="BaseModel") - -_T = TypeVar("_T") -P = ParamSpec("P") - - -@runtime_checkable -class _ConfigProtocol(Protocol): - allow_population_by_field_name: bool - - -class BaseModel(pydantic.BaseModel): - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict( - extra="allow", defer_build=coerce_boolean(os.environ.get("DEFER_PYDANTIC_BUILD", "true")) - ) - else: - - @property - @override - def model_fields_set(self) -> set[str]: - # a forwards-compat shim for pydantic v2 - return self.__fields_set__ # type: ignore - - class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] - extra: Any = pydantic.Extra.allow # type: ignore - - def to_dict( - self, - *, - mode: Literal["json", "python"] = "python", - use_api_names: bool = True, - exclude_unset: bool = True, - exclude_defaults: bool = False, - exclude_none: bool = False, - warnings: bool = True, - ) -> dict[str, object]: - """Recursively generate a dictionary representation of the model, optionally specifying which fields to include or exclude. - - By default, fields that were not set by the API will not be included, - and keys will match the API response, *not* the property names from the model. - - For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property, - the output will use the `"fooBar"` key (unless `use_api_names=False` is passed). - - Args: - mode: - If mode is 'json', the dictionary will only contain JSON serializable types. e.g. `datetime` will be turned into a string, `"2024-3-22T18:11:19.117000Z"`. - If mode is 'python', the dictionary may contain any Python objects. e.g. `datetime(2024, 3, 22)` - - use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`. - exclude_unset: Whether to exclude fields that have not been explicitly set. - exclude_defaults: Whether to exclude fields that are set to their default value from the output. - exclude_none: Whether to exclude fields that have a value of `None` from the output. - warnings: Whether to log warnings when invalid fields are encountered. This is only supported in Pydantic v2. - """ # noqa: E501 - return self.model_dump( - mode=mode, - by_alias=use_api_names, - exclude_unset=exclude_unset, - exclude_defaults=exclude_defaults, - exclude_none=exclude_none, - warnings=warnings, - ) - - def to_json( - self, - *, - indent: int | None = 2, - use_api_names: bool = True, - exclude_unset: bool = True, - exclude_defaults: bool = False, - exclude_none: bool = False, - warnings: bool = True, - ) -> str: - """Generates a JSON string representing this model as it would be received from or sent to the API (but with indentation). - - By default, fields that were not set by the API will not be included, - and keys will match the API response, *not* the property names from the model. - - For example, if the API responds with `"fooBar": true` but we've defined a `foo_bar: bool` property, - the output will use the `"fooBar"` key (unless `use_api_names=False` is passed). - - Args: - indent: Indentation to use in the JSON output. If `None` is passed, the output will be compact. Defaults to `2` - use_api_names: Whether to use the key that the API responded with or the property name. Defaults to `True`. - exclude_unset: Whether to exclude fields that have not been explicitly set. - exclude_defaults: Whether to exclude fields that have the default value. - exclude_none: Whether to exclude fields that have a value of `None`. - warnings: Whether to show any warnings that occurred during serialization. This is only supported in Pydantic v2. - """ # noqa: E501 - return self.model_dump_json( - indent=indent, - by_alias=use_api_names, - exclude_unset=exclude_unset, - exclude_defaults=exclude_defaults, - exclude_none=exclude_none, - warnings=warnings, - ) - - @override - def __str__(self) -> str: - # mypy complains about an invalid self arg - return f'{self.__repr_name__()}({self.__repr_str__(", ")})' # type: ignore[misc] - - # Override the 'construct' method in a way that supports recursive parsing without validation. - # Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836. - @classmethod - @override - def construct( - cls: type[ModelT], - _fields_set: set[str] | None = None, - **values: object, - ) -> ModelT: - m = cls.__new__(cls) - fields_values: dict[str, object] = {} - - config = get_model_config(cls) - populate_by_name = ( - config.allow_population_by_field_name - if isinstance(config, _ConfigProtocol) - else config.get("populate_by_name") - ) - - if _fields_set is None: - _fields_set = set() - - model_fields = get_model_fields(cls) - for name, field in model_fields.items(): - key = field.alias - if key is None or (key not in values and populate_by_name): - key = name - - if key in values: - fields_values[name] = _construct_field(value=values[key], field=field, key=key) - _fields_set.add(name) - else: - fields_values[name] = field_get_default(field) - - _extra = {} - for key, value in values.items(): - if key not in model_fields: - if PYDANTIC_V2: - _extra[key] = value - else: - _fields_set.add(key) - fields_values[key] = value - - object.__setattr__(m, "__dict__", fields_values) # noqa: PLC2801 - - if PYDANTIC_V2: - # these properties are copied from Pydantic's `model_construct()` method - object.__setattr__(m, "__pydantic_private__", None) # noqa: PLC2801 - object.__setattr__(m, "__pydantic_extra__", _extra) # noqa: PLC2801 - object.__setattr__(m, "__pydantic_fields_set__", _fields_set) # noqa: PLC2801 - else: - # init_private_attributes() does not exist in v2 - m._init_private_attributes() # type: ignore - - # copied from Pydantic v1's `construct()` method - object.__setattr__(m, "__fields_set__", _fields_set) # noqa: PLC2801 - - return m - - if not TYPE_CHECKING: - # type checkers incorrectly complain about this assignment - # because the type signatures are technically different - # although not in practice - model_construct = construct - - if not PYDANTIC_V2: - # we define aliases for some of the new pydantic v2 methods so - # that we can just document these methods without having to specify - # a specific pydantic version as some users may not know which - # pydantic version they are currently using - - @override - def model_dump( - self, - *, - mode: Literal["json", "python"] | str = "python", - include: IncEx = None, - exclude: IncEx = None, - by_alias: bool = False, - exclude_unset: bool = False, - exclude_defaults: bool = False, - exclude_none: bool = False, - round_trip: bool = False, - warnings: bool | Literal["none", "warn", "error"] = True, - context: dict[str, Any] | None = None, - serialize_as_any: bool = False, - ) -> dict[str, Any]: - """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump - - Generate a dictionary representation of the model, optionally specifying which fields to include or exclude. - - Args: - mode: The mode in which `to_python` should run. - If mode is 'json', the dictionary will only contain JSON serializable types. - If mode is 'python', the dictionary may contain any Python objects. - include: A list of fields to include in the output. - exclude: A list of fields to exclude from the output. - by_alias: Whether to use the field's alias in the dictionary key if defined. - exclude_unset: Whether to exclude fields that are unset or None from the output. - exclude_defaults: Whether to exclude fields that are set to their default value from the output. - exclude_none: Whether to exclude fields that have a value of `None` from the output. - round_trip: Whether to enable serialization and deserialization round-trip support. - warnings: Whether to log warnings when invalid fields are encountered. - - Returns: - A dictionary representation of the model. - """ - if mode != "python": - raise ValueError("mode is only supported in Pydantic v2") - if round_trip != False: - raise ValueError("round_trip is only supported in Pydantic v2") - if warnings != True: - raise ValueError("warnings is only supported in Pydantic v2") - if context is not None: - raise ValueError("context is only supported in Pydantic v2") - if serialize_as_any != False: - raise ValueError("serialize_as_any is only supported in Pydantic v2") - return super().dict( # pyright: ignore[reportDeprecated] - include=include, - exclude=exclude, - by_alias=by_alias, - exclude_unset=exclude_unset, - exclude_defaults=exclude_defaults, - exclude_none=exclude_none, - ) - - @override - def model_dump_json( - self, - *, - indent: int | None = None, - include: IncEx = None, - exclude: IncEx = None, - by_alias: bool = False, - exclude_unset: bool = False, - exclude_defaults: bool = False, - exclude_none: bool = False, - round_trip: bool = False, - warnings: bool | Literal["none", "warn", "error"] = True, - context: dict[str, Any] | None = None, - serialize_as_any: bool = False, - ) -> str: - """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json - - Generates a JSON representation of the model using Pydantic's `to_json` method. - - Args: - indent: Indentation to use in the JSON output. If None is passed, the output will be compact. - include: Field(s) to include in the JSON output. Can take either a string or set of strings. - exclude: Field(s) to exclude from the JSON output. Can take either a string or set of strings. - by_alias: Whether to serialize using field aliases. - exclude_unset: Whether to exclude fields that have not been explicitly set. - exclude_defaults: Whether to exclude fields that have the default value. - exclude_none: Whether to exclude fields that have a value of `None`. - round_trip: Whether to use serialization/deserialization between JSON and class instance. - warnings: Whether to show any warnings that occurred during serialization. - - Returns: - A JSON string representation of the model. - """ - if round_trip != False: - raise ValueError("round_trip is only supported in Pydantic v2") - if warnings != True: - raise ValueError("warnings is only supported in Pydantic v2") - if context is not None: - raise ValueError("context is only supported in Pydantic v2") - if serialize_as_any != False: - raise ValueError("serialize_as_any is only supported in Pydantic v2") - return super().json( # type: ignore[reportDeprecated] - indent=indent, - include=include, - exclude=exclude, - by_alias=by_alias, - exclude_unset=exclude_unset, - exclude_defaults=exclude_defaults, - exclude_none=exclude_none, - ) - - -def _construct_field(value: object, field: FieldInfo, key: str) -> object: - if value is None: - return field_get_default(field) - - if PYDANTIC_V2: - type_ = field.annotation - else: - type_ = cast(type, field.outer_type_) # type: ignore - - if type_ is None: - raise RuntimeError(f"Unexpected field type is None for {key}") - - return construct_type(value=value, type_=type_) - - -def is_basemodel(type_: type) -> bool: - """Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`""" - if is_union(type_): - return any(is_basemodel(variant) for variant in get_args(type_)) - - return is_basemodel_type(type_) - - -def is_basemodel_type(type_: type) -> TypeGuard[type[BaseModel] | type[GenericModel]]: - origin = get_origin(type_) or type_ - return issubclass(origin, BaseModel) or issubclass(origin, GenericModel) - - -def build( - base_model_cls: Callable[P, _BaseModelT], - *args: P.args, - **kwargs: P.kwargs, -) -> _BaseModelT: - """Construct a BaseModel class without validation. - - This is useful for cases where you need to instantiate a `BaseModel` - from an API response as this provides type-safe params which isn't supported - by helpers like `construct_type()`. - - ```py - build(MyModel, my_field_a="foo", my_field_b=123) - ``` - """ - if args: - raise TypeError( - "Received positional arguments which are not supported; Keyword arguments must be used instead", - ) - - return cast(_BaseModelT, construct_type(type_=base_model_cls, value=kwargs)) - - -def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T: - """Loose coercion to the expected type with construction of nested values. - - Note: the returned value from this function is not guaranteed to match the - given type. - """ - return cast(_T, construct_type(value=value, type_=type_)) - - -def construct_type(*, value: object, type_: type) -> object: - """Loose coercion to the expected type with construction of nested values. - - If the given value does not match the expected type then it is returned as-is. - """ - # we allow `object` as the input type because otherwise, passing things like - # `Literal['value']` will be reported as a type error by type checkers - type_ = cast("type[object]", type_) - - # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(type_): - meta: tuple[Any, ...] = get_args(type_)[1:] - type_ = extract_type_arg(type_, 0) - else: - meta = () - # we need to use the origin class for any types that are subscripted generics - # e.g. Dict[str, object] - origin = get_origin(type_) or type_ - args = get_args(type_) - - if is_union(origin): - try: - return validate_type(type_=cast("type[object]", type_), value=value) - except Exception: - pass - - # if the type is a discriminated union then we want to construct the right variant - # in the union, even if the data doesn't match exactly, otherwise we'd break code - # that relies on the constructed class types, e.g. - # - # class FooType: - # kind: Literal['foo'] - # value: str - # - # class BarType: - # kind: Literal['bar'] - # value: int - # - # without this block, if the data we get is something like `{'kind': 'bar', 'value': 'foo'}` then - # we'd end up constructing `FooType` when it should be `BarType`. - discriminator = _build_discriminated_union_meta(union=type_, meta_annotations=meta) - if discriminator and is_mapping(value): - variant_value = value.get(discriminator.field_alias_from or discriminator.field_name) - if variant_value and isinstance(variant_value, str): - variant_type = discriminator.mapping.get(variant_value) - if variant_type: - return construct_type(type_=variant_type, value=value) - - # if the data is not valid, use the first variant that doesn't fail while deserializing - for variant in args: - try: - return construct_type(value=value, type_=variant) - except Exception: - continue - - raise RuntimeError(f"Could not convert data into a valid instance of {type_}") - if origin == dict: - if not is_mapping(value): - return value - - _, items_type = get_args(type_) # Dict[_, items_type] - return {key: construct_type(value=item, type_=items_type) for key, item in value.items()} - - if not is_literal_type(type_) and (issubclass(origin, BaseModel) or issubclass(origin, GenericModel)): - if is_list(value): - return [cast(Any, type_).construct(**entry) if is_mapping(entry) else entry for entry in value] - - if is_mapping(value): - if issubclass(type_, BaseModel): - return type_.construct(**value) # type: ignore[arg-type] - - return cast(Any, type_).construct(**value) - - if origin == list: - if not is_list(value): - return value - - inner_type = args[0] # List[inner_type] - return [construct_type(value=entry, type_=inner_type) for entry in value] - - if origin == float: - if isinstance(value, int): - coerced = float(value) - if coerced != value: - return value - return coerced - - return value - - if type_ == datetime: - try: - return parse_datetime(value) # type: ignore - except Exception: - return value - - if type_ == date: - try: - return parse_date(value) # type: ignore - except Exception: - return value - - return value - - -@runtime_checkable -class CachedDiscriminatorType(Protocol): - __discriminator__: DiscriminatorDetails - - -class DiscriminatorDetails: - field_name: str - """The name of the discriminator field in the variant class, e.g. - - ```py - class Foo(BaseModel): - type: Literal['foo'] - ``` - - Will result in field_name='type' - """ - - field_alias_from: str | None - """The name of the discriminator field in the API response, e.g. - - ```py - class Foo(BaseModel): - type: Literal['foo'] = Field(alias='type_from_api') - ``` - - Will result in field_alias_from='type_from_api' - """ - - mapping: dict[str, type] - """Mapping of discriminator value to variant type, e.g. - - {'foo': FooVariant, 'bar': BarVariant} - """ - - def __init__( - self, - *, - mapping: dict[str, type], - discriminator_field: str, - discriminator_alias: str | None, - ) -> None: - self.mapping = mapping - self.field_name = discriminator_field - self.field_alias_from = discriminator_alias - - -def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None: - if isinstance(union, CachedDiscriminatorType): - return union.__discriminator__ - - discriminator_field_name: str | None = None - - for annotation in meta_annotations: - if isinstance(annotation, PropertyInfo) and annotation.discriminator is not None: - discriminator_field_name = annotation.discriminator - break - - if not discriminator_field_name: - return None - - mapping: dict[str, type] = {} - discriminator_alias: str | None = None - - for variant in get_args(union): - variant = strip_annotated_type(variant) - if is_basemodel_type(variant): - if PYDANTIC_V2: - field = _extract_field_schema_pv2(variant, discriminator_field_name) - if not field: - continue - - # Note: if one variant defines an alias then they all should - discriminator_alias = field.get("serialization_alias") - - field_schema = field["schema"] - - if field_schema["type"] == "literal": - for entry in cast("LiteralSchema", field_schema)["expected"]: - if isinstance(entry, str): - mapping[entry] = variant - else: - field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] - if not field_info: - continue - - # Note: if one variant defines an alias then they all should - discriminator_alias = field_info.alias - - if field_info.annotation and is_literal_type(field_info.annotation): - for entry in get_args(field_info.annotation): - if isinstance(entry, str): - mapping[entry] = variant - - if not mapping: - return None - - details = DiscriminatorDetails( - mapping=mapping, - discriminator_field=discriminator_field_name, - discriminator_alias=discriminator_alias, - ) - cast(CachedDiscriminatorType, union).__discriminator__ = details - return details - - -def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None: - schema = model.__pydantic_core_schema__ - if schema["type"] != "model": - return None - - fields_schema = schema["schema"] - if fields_schema["type"] != "model-fields": - return None - - fields_schema = cast("ModelFieldsSchema", fields_schema) - - field = fields_schema["fields"].get(field_name) - if not field: - return None - - return cast("ModelField", field) # pyright: ignore[reportUnnecessaryCast] - - -def validate_type(*, type_: type[_T], value: object) -> _T: - """Strict validation that the given value matches the expected type""" - if inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): - return cast(_T, parse_obj(type_, value)) - - return cast(_T, _validate_non_model_type(type_=type_, value=value)) - - -# Subclassing here confuses type checkers, so we treat this class as non-inheriting. -if TYPE_CHECKING: - GenericModel = BaseModel -else: - - class GenericModel(BaseGenericModel, BaseModel): - pass - - -if PYDANTIC_V2: - from pydantic import TypeAdapter - - def _validate_non_model_type(*, type_: type[_T], value: object) -> _T: - return TypeAdapter(type_).validate_python(value) - -elif not TYPE_CHECKING: - - class TypeAdapter(Generic[_T]): - """Used as a placeholder to easily convert runtime types to a Pydantic format - to provide validation. - - For example: - ```py - validated = RootModel[int](__root__="5").__root__ - # validated: 5 - ``` - """ - - def __init__(self, type_: type[_T]): - self.type_ = type_ - - def validate_python(self, value: Any) -> _T: - if not isinstance(value, self.type_): - raise ValueError(f"Invalid type: {value} is not of type {self.type_}") - return value - - def _validate_non_model_type(*, type_: type[_T], value: object) -> _T: - return TypeAdapter(type_).validate_python(value) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py deleted file mode 100644 index ea1d3f09dc..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py +++ /dev/null @@ -1,170 +0,0 @@ -from __future__ import annotations - -from collections.abc import Callable, Mapping, Sequence -from os import PathLike -from typing import ( - IO, - TYPE_CHECKING, - Any, - Literal, - Optional, - TypeAlias, - TypeVar, - Union, -) - -import pydantic -from httpx import Response -from typing_extensions import Protocol, TypedDict, override, runtime_checkable - -Query = Mapping[str, object] -Body = object -AnyMapping = Mapping[str, object] -PrimitiveData = Union[str, int, float, bool, None] -Data = Union[PrimitiveData, list[Any], tuple[Any], "Mapping[str, Any]"] -ModelT = TypeVar("ModelT", bound=pydantic.BaseModel) -_T = TypeVar("_T") - -if TYPE_CHECKING: - NoneType: type[None] -else: - NoneType = type(None) - - -# Sentinel class used until PEP 0661 is accepted -class NotGiven: - """ - A sentinel singleton class used to distinguish omitted keyword arguments - from those passed in with the value None (which may have different behavior). - - For example: - - ```py - def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ... - - get(timeout=1) # 1s timeout - get(timeout=None) # No timeout - get() # Default timeout behavior, which may not be statically known at the method definition. - ``` - """ - - def __bool__(self) -> Literal[False]: - return False - - @override - def __repr__(self) -> str: - return "NOT_GIVEN" - - -NotGivenOr = Union[_T, NotGiven] -NOT_GIVEN = NotGiven() - - -class Omit: - """In certain situations you need to be able to represent a case where a default value has - to be explicitly removed and `None` is not an appropriate substitute, for example: - - ```py - # as the default `Content-Type` header is `application/json` that will be sent - client.post('/upload/files', files={'file': b'my raw file content'}) - - # you can't explicitly override the header as it has to be dynamically generated - # to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983' - client.post(..., headers={'Content-Type': 'multipart/form-data'}) - - # instead you can remove the default `application/json` header by passing Omit - client.post(..., headers={'Content-Type': Omit()}) - ``` - """ - - def __bool__(self) -> Literal[False]: - return False - - -@runtime_checkable -class ModelBuilderProtocol(Protocol): - @classmethod - def build( - cls: type[_T], - *, - response: Response, - data: object, - ) -> _T: ... - - -Headers = Mapping[str, Union[str, Omit]] - - -class HeadersLikeProtocol(Protocol): - def get(self, __key: str) -> str | None: ... - - -HeadersLike = Union[Headers, HeadersLikeProtocol] - -ResponseT = TypeVar( - "ResponseT", - bound="Union[str, None, BaseModel, list[Any], dict[str, Any], Response, UnknownResponse, ModelBuilderProtocol, BinaryResponseContent]", # noqa: E501 -) - -StrBytesIntFloat = Union[str, bytes, int, float] - -# Note: copied from Pydantic -# https://github.com/pydantic/pydantic/blob/32ea570bf96e84234d2992e1ddf40ab8a565925a/pydantic/main.py#L49 -IncEx: TypeAlias = "set[int] | set[str] | dict[int, Any] | dict[str, Any] | None" - -PostParser = Callable[[Any], Any] - - -@runtime_checkable -class InheritsGeneric(Protocol): - """Represents a type that has inherited from `Generic` - - The `__orig_bases__` property can be used to determine the resolved - type variable for a given base class. - """ - - __orig_bases__: tuple[_GenericAlias] - - -class _GenericAlias(Protocol): - __origin__: type[object] - - -class HttpxSendArgs(TypedDict, total=False): - auth: httpx.Auth - - -# for user input files -if TYPE_CHECKING: - Base64FileInput = Union[IO[bytes], PathLike[str]] - FileContent = Union[IO[bytes], bytes, PathLike[str]] -else: - Base64FileInput = Union[IO[bytes], PathLike] - FileContent = Union[IO[bytes], bytes, PathLike] - -FileTypes = Union[ - # file (or bytes) - FileContent, - # (filename, file (or bytes)) - tuple[Optional[str], FileContent], - # (filename, file (or bytes), content_type) - tuple[Optional[str], FileContent, Optional[str]], - # (filename, file (or bytes), content_type, headers) - tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], -] -RequestFiles = Union[Mapping[str, FileTypes], Sequence[tuple[str, FileTypes]]] - -# duplicate of the above but without our custom file support -HttpxFileContent = Union[bytes, IO[bytes]] -HttpxFileTypes = Union[ - # file (or bytes) - HttpxFileContent, - # (filename, file (or bytes)) - tuple[Optional[str], HttpxFileContent], - # (filename, file (or bytes), content_type) - tuple[Optional[str], HttpxFileContent, Optional[str]], - # (filename, file (or bytes), content_type, headers) - tuple[Optional[str], HttpxFileContent, Optional[str], Mapping[str, str]], -] - -HttpxRequestFiles = Union[Mapping[str, HttpxFileTypes], Sequence[tuple[str, HttpxFileTypes]]] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_constants.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_constants.py deleted file mode 100644 index 8e43bdebec..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_constants.py +++ /dev/null @@ -1,12 +0,0 @@ -import httpx - -RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response" -# 通过 `Timeout` 控制接口`connect` 和 `read` 超时时间,默认为`timeout=300.0, connect=8.0` -ZHIPUAI_DEFAULT_TIMEOUT = httpx.Timeout(timeout=300.0, connect=8.0) -# 通过 `retry` 参数控制重试次数,默认为3次 -ZHIPUAI_DEFAULT_MAX_RETRIES = 3 -# 通过 `Limits` 控制最大连接数和保持连接数,默认为`max_connections=50, max_keepalive_connections=10` -ZHIPUAI_DEFAULT_LIMITS = httpx.Limits(max_connections=50, max_keepalive_connections=10) - -INITIAL_RETRY_DELAY = 0.5 -MAX_RETRY_DELAY = 8.0 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_errors.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_errors.py deleted file mode 100644 index e2c9d24c6c..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_errors.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import annotations - -import httpx - -__all__ = [ - "ZhipuAIError", - "APIStatusError", - "APIRequestFailedError", - "APIAuthenticationError", - "APIReachLimitError", - "APIInternalError", - "APIServerFlowExceedError", - "APIResponseError", - "APIResponseValidationError", - "APITimeoutError", - "APIConnectionError", -] - - -class ZhipuAIError(Exception): - def __init__( - self, - message: str, - ) -> None: - super().__init__(message) - - -class APIStatusError(ZhipuAIError): - response: httpx.Response - status_code: int - - def __init__(self, message: str, *, response: httpx.Response) -> None: - super().__init__(message) - self.response = response - self.status_code = response.status_code - - -class APIRequestFailedError(APIStatusError): ... - - -class APIAuthenticationError(APIStatusError): ... - - -class APIReachLimitError(APIStatusError): ... - - -class APIInternalError(APIStatusError): ... - - -class APIServerFlowExceedError(APIStatusError): ... - - -class APIResponseError(ZhipuAIError): - message: str - request: httpx.Request - json_data: object - - def __init__(self, message: str, request: httpx.Request, json_data: object): - self.message = message - self.request = request - self.json_data = json_data - super().__init__(message) - - -class APIResponseValidationError(APIResponseError): - status_code: int - response: httpx.Response - - def __init__(self, response: httpx.Response, json_data: object | None, *, message: str | None = None) -> None: - super().__init__( - message=message or "Data returned by API invalid for expected schema.", - request=response.request, - json_data=json_data, - ) - self.response = response - self.status_code = response.status_code - - -class APIConnectionError(APIResponseError): - def __init__(self, *, message: str = "Connection error.", request: httpx.Request) -> None: - super().__init__(message, request, json_data=None) - - -class APITimeoutError(APIConnectionError): - def __init__(self, request: httpx.Request) -> None: - super().__init__(message="Request timed out.", request=request) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_files.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_files.py deleted file mode 100644 index f9d2e14d9e..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_files.py +++ /dev/null @@ -1,75 +0,0 @@ -from __future__ import annotations - -import io -import os -import pathlib -from typing import TypeGuard, overload - -from ._base_type import ( - Base64FileInput, - FileContent, - FileTypes, - HttpxFileContent, - HttpxFileTypes, - HttpxRequestFiles, - RequestFiles, -) -from ._utils import is_mapping_t, is_sequence_t, is_tuple_t - - -def is_base64_file_input(obj: object) -> TypeGuard[Base64FileInput]: - return isinstance(obj, io.IOBase | os.PathLike) - - -def is_file_content(obj: object) -> TypeGuard[FileContent]: - return isinstance(obj, bytes | tuple | io.IOBase | os.PathLike) - - -def assert_is_file_content(obj: object, *, key: str | None = None) -> None: - if not is_file_content(obj): - prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`" - raise RuntimeError( - f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/openai/openai-python/tree/main#file-uploads" - ) from None - - -@overload -def to_httpx_files(files: None) -> None: ... - - -@overload -def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ... - - -def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None: - if files is None: - return None - - if is_mapping_t(files): - files = {key: _transform_file(file) for key, file in files.items()} - elif is_sequence_t(files): - files = [(key, _transform_file(file)) for key, file in files] - else: - raise TypeError(f"Unexpected file type input {type(files)}, expected mapping or sequence") - - return files - - -def _transform_file(file: FileTypes) -> HttpxFileTypes: - if is_file_content(file): - if isinstance(file, os.PathLike): - path = pathlib.Path(file) - return (path.name, path.read_bytes()) - - return file - - if is_tuple_t(file): - return (file[0], _read_file_content(file[1]), *file[2:]) - - raise TypeError("Expected file types input to be a FileContent type or to be a tuple") - - -def _read_file_content(file: FileContent) -> HttpxFileContent: - if isinstance(file, os.PathLike): - return pathlib.Path(file).read_bytes() - return file diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py deleted file mode 100644 index ffdafb85d5..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_http_client.py +++ /dev/null @@ -1,910 +0,0 @@ -from __future__ import annotations - -import inspect -import logging -import time -import warnings -from collections.abc import Iterator, Mapping -from itertools import starmap -from random import random -from typing import TYPE_CHECKING, Any, Generic, Literal, Optional, TypeVar, Union, cast, overload - -import httpx -import pydantic -from httpx import URL, Timeout - -from . import _errors, get_origin -from ._base_compat import model_copy -from ._base_models import GenericModel, construct_type, validate_type -from ._base_type import ( - NOT_GIVEN, - AnyMapping, - Body, - Data, - Headers, - HttpxSendArgs, - ModelBuilderProtocol, - NotGiven, - Omit, - PostParser, - Query, - RequestFiles, - ResponseT, -) -from ._constants import ( - INITIAL_RETRY_DELAY, - MAX_RETRY_DELAY, - RAW_RESPONSE_HEADER, - ZHIPUAI_DEFAULT_LIMITS, - ZHIPUAI_DEFAULT_MAX_RETRIES, - ZHIPUAI_DEFAULT_TIMEOUT, -) -from ._errors import APIConnectionError, APIResponseValidationError, APIStatusError, APITimeoutError -from ._files import to_httpx_files -from ._legacy_response import LegacyAPIResponse -from ._request_opt import FinalRequestOptions, UserRequestInput -from ._response import APIResponse, BaseAPIResponse, extract_response_type -from ._sse_client import StreamResponse -from ._utils import flatten, is_given, is_mapping - -log: logging.Logger = logging.getLogger(__name__) - -# TODO: make base page type vars covariant -SyncPageT = TypeVar("SyncPageT", bound="BaseSyncPage[Any]") -# AsyncPageT = TypeVar("AsyncPageT", bound="BaseAsyncPage[Any]") - -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) - -if TYPE_CHECKING: - from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT -else: - try: - from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT - except ImportError: - # taken from https://github.com/encode/httpx/blob/3ba5fe0d7ac70222590e759c31442b1cab263791/httpx/_config.py#L366 - HTTPX_DEFAULT_TIMEOUT = Timeout(5.0) - - -headers = { - "Accept": "application/json", - "Content-Type": "application/json; charset=UTF-8", -} - - -class PageInfo: - """Stores the necessary information to build the request to retrieve the next page. - - Either `url` or `params` must be set. - """ - - url: URL | NotGiven - params: Query | NotGiven - - @overload - def __init__( - self, - *, - url: URL, - ) -> None: ... - - @overload - def __init__( - self, - *, - params: Query, - ) -> None: ... - - def __init__( - self, - *, - url: URL | NotGiven = NOT_GIVEN, - params: Query | NotGiven = NOT_GIVEN, - ) -> None: - self.url = url - self.params = params - - -class BasePage(GenericModel, Generic[_T]): - """ - Defines the core interface for pagination. - - Type Args: - ModelT: The pydantic model that represents an item in the response. - - Methods: - has_next_page(): Check if there is another page available - next_page_info(): Get the necessary information to make a request for the next page - """ - - _options: FinalRequestOptions = pydantic.PrivateAttr() - _model: type[_T] = pydantic.PrivateAttr() - - def has_next_page(self) -> bool: - items = self._get_page_items() - if not items: - return False - return self.next_page_info() is not None - - def next_page_info(self) -> Optional[PageInfo]: ... - - def _get_page_items(self) -> Iterable[_T]: # type: ignore[empty-body] - ... - - def _params_from_url(self, url: URL) -> httpx.QueryParams: - # TODO: do we have to preprocess params here? - return httpx.QueryParams(cast(Any, self._options.params)).merge(url.params) - - def _info_to_options(self, info: PageInfo) -> FinalRequestOptions: - options = model_copy(self._options) - options._strip_raw_response_header() - - if not isinstance(info.params, NotGiven): - options.params = {**options.params, **info.params} - return options - - if not isinstance(info.url, NotGiven): - params = self._params_from_url(info.url) - url = info.url.copy_with(params=params) - options.params = dict(url.params) - options.url = str(url) - return options - - raise ValueError("Unexpected PageInfo state") - - -class BaseSyncPage(BasePage[_T], Generic[_T]): - _client: HttpClient = pydantic.PrivateAttr() - - def _set_private_attributes( - self, - client: HttpClient, - model: type[_T], - options: FinalRequestOptions, - ) -> None: - self._model = model - self._client = client - self._options = options - - # Pydantic uses a custom `__iter__` method to support casting BaseModels - # to dictionaries. e.g. dict(model). - # As we want to support `for item in page`, this is inherently incompatible - # with the default pydantic behavior. It is not possible to support both - # use cases at once. Fortunately, this is not a big deal as all other pydantic - # methods should continue to work as expected as there is an alternative method - # to cast a model to a dictionary, model.dict(), which is used internally - # by pydantic. - def __iter__(self) -> Iterator[_T]: # type: ignore - for page in self.iter_pages(): - yield from page._get_page_items() - - def iter_pages(self: SyncPageT) -> Iterator[SyncPageT]: - page = self - while True: - yield page - if page.has_next_page(): - page = page.get_next_page() - else: - return - - def get_next_page(self: SyncPageT) -> SyncPageT: - info = self.next_page_info() - if not info: - raise RuntimeError( - "No next page expected; please check `.has_next_page()` before calling `.get_next_page()`." - ) - - options = self._info_to_options(info) - return self._client._request_api_list(self._model, page=self.__class__, options=options) - - -class HttpClient: - _client: httpx.Client - _version: str - _base_url: URL - max_retries: int - timeout: Union[float, Timeout, None] - _limits: httpx.Limits - _has_custom_http_client: bool - _default_stream_cls: type[StreamResponse[Any]] | None = None - - _strict_response_validation: bool - - def __init__( - self, - *, - version: str, - base_url: URL, - _strict_response_validation: bool, - max_retries: int = ZHIPUAI_DEFAULT_MAX_RETRIES, - timeout: Union[float, Timeout, None], - limits: httpx.Limits | None = None, - custom_httpx_client: httpx.Client | None = None, - custom_headers: Mapping[str, str] | None = None, - ) -> None: - if limits is not None: - warnings.warn( - "The `connection_pool_limits` argument is deprecated. The `http_client` argument should be passed instead", # noqa: E501 - category=DeprecationWarning, - stacklevel=3, - ) - if custom_httpx_client is not None: - raise ValueError("The `http_client` argument is mutually exclusive with `connection_pool_limits`") - else: - limits = ZHIPUAI_DEFAULT_LIMITS - - if not is_given(timeout): - if custom_httpx_client and custom_httpx_client.timeout != HTTPX_DEFAULT_TIMEOUT: - timeout = custom_httpx_client.timeout - else: - timeout = ZHIPUAI_DEFAULT_TIMEOUT - self.max_retries = max_retries - self.timeout = timeout - self._limits = limits - self._has_custom_http_client = bool(custom_httpx_client) - self._client = custom_httpx_client or httpx.Client( - base_url=base_url, - timeout=self.timeout, - limits=limits, - ) - self._version = version - url = URL(url=base_url) - if not url.raw_path.endswith(b"/"): - url = url.copy_with(raw_path=url.raw_path + b"/") - self._base_url = url - self._custom_headers = custom_headers or {} - self._strict_response_validation = _strict_response_validation - - def _prepare_url(self, url: str) -> URL: - sub_url = URL(url) - if sub_url.is_relative_url: - request_raw_url = self._base_url.raw_path + sub_url.raw_path.lstrip(b"/") - return self._base_url.copy_with(raw_path=request_raw_url) - - return sub_url - - @property - def _default_headers(self): - return { - "Accept": "application/json", - "Content-Type": "application/json; charset=UTF-8", - "ZhipuAI-SDK-Ver": self._version, - "source_type": "zhipu-sdk-python", - "x-request-sdk": "zhipu-sdk-python", - **self.auth_headers, - **self._custom_headers, - } - - @property - def custom_auth(self) -> httpx.Auth | None: - return None - - @property - def auth_headers(self): - return {} - - def _prepare_headers(self, options: FinalRequestOptions) -> httpx.Headers: - custom_headers = options.headers or {} - headers_dict = _merge_mappings(self._default_headers, custom_headers) - - httpx_headers = httpx.Headers(headers_dict) - - return httpx_headers - - def _remaining_retries( - self, - remaining_retries: Optional[int], - options: FinalRequestOptions, - ) -> int: - return remaining_retries if remaining_retries is not None else options.get_max_retries(self.max_retries) - - def _calculate_retry_timeout( - self, - remaining_retries: int, - options: FinalRequestOptions, - response_headers: Optional[httpx.Headers] = None, - ) -> float: - max_retries = options.get_max_retries(self.max_retries) - - # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says. - # retry_after = self._parse_retry_after_header(response_headers) - # if retry_after is not None and 0 < retry_after <= 60: - # return retry_after - - nb_retries = max_retries - remaining_retries - - # Apply exponential backoff, but not more than the max. - sleep_seconds = min(INITIAL_RETRY_DELAY * pow(2.0, nb_retries), MAX_RETRY_DELAY) - - # Apply some jitter, plus-or-minus half a second. - jitter = 1 - 0.25 * random() - timeout = sleep_seconds * jitter - return max(timeout, 0) - - def _build_request(self, options: FinalRequestOptions) -> httpx.Request: - kwargs: dict[str, Any] = {} - headers = self._prepare_headers(options) - url = self._prepare_url(options.url) - json_data = options.json_data - if options.extra_json is not None: - if json_data is None: - json_data = cast(Body, options.extra_json) - elif is_mapping(json_data): - json_data = _merge_mappings(json_data, options.extra_json) - else: - raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`") - - content_type = headers.get("Content-Type") - # multipart/form-data; boundary=---abc-- - if headers.get("Content-Type") == "multipart/form-data": - if "boundary" not in content_type: - # only remove the header if the boundary hasn't been explicitly set - # as the caller doesn't want httpx to come up with their own boundary - headers.pop("Content-Type") - - if json_data: - kwargs["data"] = self._make_multipartform(json_data) - - return self._client.build_request( - headers=headers, - timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout, - method=options.method, - url=url, - json=json_data, - files=options.files, - params=options.params, - **kwargs, - ) - - def _object_to_formdata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]: - items = [] - - if isinstance(value, Mapping): - for k, v in value.items(): - items.extend(self._object_to_formdata(f"{key}[{k}]", v)) - return items - if isinstance(value, list | tuple): - for v in value: - items.extend(self._object_to_formdata(key + "[]", v)) - return items - - def _primitive_value_to_str(val) -> str: - # copied from httpx - if val is True: - return "true" - elif val is False: - return "false" - elif val is None: - return "" - return str(val) - - str_data = _primitive_value_to_str(value) - - if not str_data: - return [] - return [(key, str_data)] - - def _make_multipartform(self, data: Mapping[object, object]) -> dict[str, object]: - items = flatten(list(starmap(self._object_to_formdata, data.items()))) - - serialized: dict[str, object] = {} - for key, value in items: - if key in serialized: - raise ValueError(f"存在重复的键: {key};") - serialized[key] = value - return serialized - - def _process_response_data( - self, - *, - data: object, - cast_type: type[ResponseT], - response: httpx.Response, - ) -> ResponseT: - if data is None: - return cast(ResponseT, None) - - if cast_type is object: - return cast(ResponseT, data) - - try: - if inspect.isclass(cast_type) and issubclass(cast_type, ModelBuilderProtocol): - return cast(ResponseT, cast_type.build(response=response, data=data)) - - if self._strict_response_validation: - return cast(ResponseT, validate_type(type_=cast_type, value=data)) - - return cast(ResponseT, construct_type(type_=cast_type, value=data)) - except pydantic.ValidationError as err: - raise APIResponseValidationError(response=response, json_data=data) from err - - def _should_stream_response_body(self, request: httpx.Request) -> bool: - return request.headers.get(RAW_RESPONSE_HEADER) == "stream" # type: ignore[no-any-return] - - def _should_retry(self, response: httpx.Response) -> bool: - # Note: this is not a standard header - should_retry_header = response.headers.get("x-should-retry") - - # If the server explicitly says whether or not to retry, obey. - if should_retry_header == "true": - log.debug("Retrying as header `x-should-retry` is set to `true`") - return True - if should_retry_header == "false": - log.debug("Not retrying as header `x-should-retry` is set to `false`") - return False - - # Retry on request timeouts. - if response.status_code == 408: - log.debug("Retrying due to status code %i", response.status_code) - return True - - # Retry on lock timeouts. - if response.status_code == 409: - log.debug("Retrying due to status code %i", response.status_code) - return True - - # Retry on rate limits. - if response.status_code == 429: - log.debug("Retrying due to status code %i", response.status_code) - return True - - # Retry internal errors. - if response.status_code >= 500: - log.debug("Retrying due to status code %i", response.status_code) - return True - - log.debug("Not retrying") - return False - - def is_closed(self) -> bool: - return self._client.is_closed - - def close(self): - self._client.close() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - - def request( - self, - cast_type: type[ResponseT], - options: FinalRequestOptions, - remaining_retries: Optional[int] = None, - *, - stream: bool = False, - stream_cls: type[StreamResponse] | None = None, - ) -> ResponseT | StreamResponse: - return self._request( - cast_type=cast_type, - options=options, - stream=stream, - stream_cls=stream_cls, - remaining_retries=remaining_retries, - ) - - def _request( - self, - *, - cast_type: type[ResponseT], - options: FinalRequestOptions, - remaining_retries: int | None, - stream: bool, - stream_cls: type[StreamResponse] | None, - ) -> ResponseT | StreamResponse: - retries = self._remaining_retries(remaining_retries, options) - request = self._build_request(options) - - kwargs: HttpxSendArgs = {} - if self.custom_auth is not None: - kwargs["auth"] = self.custom_auth - try: - response = self._client.send( - request, - stream=stream or self._should_stream_response_body(request=request), - **kwargs, - ) - except httpx.TimeoutException as err: - log.debug("Encountered httpx.TimeoutException", exc_info=True) - - if retries > 0: - return self._retry_request( - options, - cast_type, - retries, - stream=stream, - stream_cls=stream_cls, - response_headers=None, - ) - - log.debug("Raising timeout error") - raise APITimeoutError(request=request) from err - except Exception as err: - log.debug("Encountered Exception", exc_info=True) - - if retries > 0: - return self._retry_request( - options, - cast_type, - retries, - stream=stream, - stream_cls=stream_cls, - response_headers=None, - ) - - log.debug("Raising connection error") - raise APIConnectionError(request=request) from err - - log.debug( - 'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase - ) - - try: - response.raise_for_status() - except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code - log.debug("Encountered httpx.HTTPStatusError", exc_info=True) - - if retries > 0 and self._should_retry(err.response): - err.response.close() - return self._retry_request( - options, - cast_type, - retries, - err.response.headers, - stream=stream, - stream_cls=stream_cls, - ) - - # If the response is streamed then we need to explicitly read the response - # to completion before attempting to access the response text. - if not err.response.is_closed: - err.response.read() - - log.debug("Re-raising status error") - raise self._make_status_error(err.response) from None - - # return self._parse_response( - # cast_type=cast_type, - # options=options, - # response=response, - # stream=stream, - # stream_cls=stream_cls, - # ) - return self._process_response( - cast_type=cast_type, - options=options, - response=response, - stream=stream, - stream_cls=stream_cls, - ) - - def _retry_request( - self, - options: FinalRequestOptions, - cast_type: type[ResponseT], - remaining_retries: int, - response_headers: httpx.Headers | None, - *, - stream: bool, - stream_cls: type[StreamResponse] | None, - ) -> ResponseT | StreamResponse: - remaining = remaining_retries - 1 - if remaining == 1: - log.debug("1 retry left") - else: - log.debug("%i retries left", remaining) - - timeout = self._calculate_retry_timeout(remaining, options, response_headers) - log.info("Retrying request to %s in %f seconds", options.url, timeout) - - # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a - # different thread if necessary. - time.sleep(timeout) - - return self._request( - options=options, - cast_type=cast_type, - remaining_retries=remaining, - stream=stream, - stream_cls=stream_cls, - ) - - def _process_response( - self, - *, - cast_type: type[ResponseT], - options: FinalRequestOptions, - response: httpx.Response, - stream: bool, - stream_cls: type[StreamResponse] | None, - ) -> ResponseT: - # _legacy_response with raw_response_header to parser method - if response.request.headers.get(RAW_RESPONSE_HEADER) == "true": - return cast( - ResponseT, - LegacyAPIResponse( - raw=response, - client=self, - cast_type=cast_type, - stream=stream, - stream_cls=stream_cls, - options=options, - ), - ) - - origin = get_origin(cast_type) or cast_type - - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): - if not issubclass(origin, APIResponse): - raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}") - - response_cls = cast("type[BaseAPIResponse[Any]]", cast_type) - return cast( - ResponseT, - response_cls( - raw=response, - client=self, - cast_type=extract_response_type(response_cls), - stream=stream, - stream_cls=stream_cls, - options=options, - ), - ) - - if cast_type == httpx.Response: - return cast(ResponseT, response) - - api_response = APIResponse( - raw=response, - client=self, - cast_type=cast("type[ResponseT]", cast_type), # pyright: ignore[reportUnnecessaryCast] - stream=stream, - stream_cls=stream_cls, - options=options, - ) - if bool(response.request.headers.get(RAW_RESPONSE_HEADER)): - return cast(ResponseT, api_response) - - return api_response.parse() - - def _request_api_list( - self, - model: type[object], - page: type[SyncPageT], - options: FinalRequestOptions, - ) -> SyncPageT: - def _parser(resp: SyncPageT) -> SyncPageT: - resp._set_private_attributes( - client=self, - model=model, - options=options, - ) - return resp - - options.post_parser = _parser - - return self.request(page, options, stream=False) - - @overload - def get( - self, - path: str, - *, - cast_type: type[ResponseT], - options: UserRequestInput = {}, - stream: Literal[False] = False, - ) -> ResponseT: ... - - @overload - def get( - self, - path: str, - *, - cast_type: type[ResponseT], - options: UserRequestInput = {}, - stream: Literal[True], - stream_cls: type[StreamResponse], - ) -> StreamResponse: ... - - @overload - def get( - self, - path: str, - *, - cast_type: type[ResponseT], - options: UserRequestInput = {}, - stream: bool, - stream_cls: type[StreamResponse] | None = None, - ) -> ResponseT | StreamResponse: ... - - def get( - self, - path: str, - *, - cast_type: type[ResponseT], - options: UserRequestInput = {}, - stream: bool = False, - stream_cls: type[StreamResponse] | None = None, - ) -> ResponseT: - opts = FinalRequestOptions.construct(method="get", url=path, **options) - return cast(ResponseT, self.request(cast_type, opts, stream=stream, stream_cls=stream_cls)) - - @overload - def post( - self, - path: str, - *, - cast_type: type[ResponseT], - body: Body | None = None, - options: UserRequestInput = {}, - files: RequestFiles | None = None, - stream: Literal[False] = False, - ) -> ResponseT: ... - - @overload - def post( - self, - path: str, - *, - cast_type: type[ResponseT], - body: Body | None = None, - options: UserRequestInput = {}, - files: RequestFiles | None = None, - stream: Literal[True], - stream_cls: type[StreamResponse], - ) -> StreamResponse: ... - - @overload - def post( - self, - path: str, - *, - cast_type: type[ResponseT], - body: Body | None = None, - options: UserRequestInput = {}, - files: RequestFiles | None = None, - stream: bool, - stream_cls: type[StreamResponse] | None = None, - ) -> ResponseT | StreamResponse: ... - - def post( - self, - path: str, - *, - cast_type: type[ResponseT], - body: Body | None = None, - options: UserRequestInput = {}, - files: RequestFiles | None = None, - stream: bool = False, - stream_cls: type[StreamResponse[Any]] | None = None, - ) -> ResponseT | StreamResponse: - opts = FinalRequestOptions.construct( - method="post", url=path, json_data=body, files=to_httpx_files(files), **options - ) - - return cast(ResponseT, self.request(cast_type, opts, stream=stream, stream_cls=stream_cls)) - - def patch( - self, - path: str, - *, - cast_type: type[ResponseT], - body: Body | None = None, - options: UserRequestInput = {}, - ) -> ResponseT: - opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) - - return self.request( - cast_type=cast_type, - options=opts, - ) - - def put( - self, - path: str, - *, - cast_type: type[ResponseT], - body: Body | None = None, - options: UserRequestInput = {}, - files: RequestFiles | None = None, - ) -> ResponseT | StreamResponse: - opts = FinalRequestOptions.construct( - method="put", url=path, json_data=body, files=to_httpx_files(files), **options - ) - - return self.request( - cast_type=cast_type, - options=opts, - ) - - def delete( - self, - path: str, - *, - cast_type: type[ResponseT], - body: Body | None = None, - options: UserRequestInput = {}, - ) -> ResponseT | StreamResponse: - opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options) - - return self.request( - cast_type=cast_type, - options=opts, - ) - - def get_api_list( - self, - path: str, - *, - model: type[object], - page: type[SyncPageT], - body: Body | None = None, - options: UserRequestInput = {}, - method: str = "get", - ) -> SyncPageT: - opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options) - return self._request_api_list(model, page, opts) - - def _make_status_error(self, response) -> APIStatusError: - response_text = response.text.strip() - status_code = response.status_code - error_msg = f"Error code: {status_code}, with error text {response_text}" - - if status_code == 400: - return _errors.APIRequestFailedError(message=error_msg, response=response) - elif status_code == 401: - return _errors.APIAuthenticationError(message=error_msg, response=response) - elif status_code == 429: - return _errors.APIReachLimitError(message=error_msg, response=response) - elif status_code == 500: - return _errors.APIInternalError(message=error_msg, response=response) - elif status_code == 503: - return _errors.APIServerFlowExceedError(message=error_msg, response=response) - return APIStatusError(message=error_msg, response=response) - - -def make_request_options( - *, - query: Query | None = None, - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - post_parser: PostParser | NotGiven = NOT_GIVEN, -) -> UserRequestInput: - """Create a dict of type RequestOptions without keys of NotGiven values.""" - options: UserRequestInput = {} - if extra_headers is not None: - options["headers"] = extra_headers - - if extra_body is not None: - options["extra_json"] = cast(AnyMapping, extra_body) - - if query is not None: - options["params"] = query - - if extra_query is not None: - options["params"] = {**options.get("params", {}), **extra_query} - - if not isinstance(timeout, NotGiven): - options["timeout"] = timeout - - if is_given(post_parser): - # internal - options["post_parser"] = post_parser # type: ignore - - return options - - -def _merge_mappings( - obj1: Mapping[_T_co, Union[_T, Omit]], - obj2: Mapping[_T_co, Union[_T, Omit]], -) -> dict[_T_co, _T]: - """Merge two mappings of the same type, removing any values that are instances of `Omit`. - - In cases with duplicate keys the second mapping takes precedence. - """ - merged = {**obj1, **obj2} - return {key: value for key, value in merged.items() if not isinstance(value, Omit)} diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_jwt_token.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_jwt_token.py deleted file mode 100644 index 21f158a5f4..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_jwt_token.py +++ /dev/null @@ -1,31 +0,0 @@ -import time - -import cachetools.func -import jwt - -# 缓存时间 3分钟 -CACHE_TTL_SECONDS = 3 * 60 - -# token 有效期比缓存时间 多30秒 -API_TOKEN_TTL_SECONDS = CACHE_TTL_SECONDS + 30 - - -@cachetools.func.ttl_cache(maxsize=10, ttl=CACHE_TTL_SECONDS) -def generate_token(apikey: str): - try: - api_key, secret = apikey.split(".") - except Exception as e: - raise Exception("invalid api_key", e) - - payload = { - "api_key": api_key, - "exp": int(round(time.time() * 1000)) + API_TOKEN_TTL_SECONDS * 1000, - "timestamp": int(round(time.time() * 1000)), - } - ret = jwt.encode( - payload, - secret, - algorithm="HS256", - headers={"alg": "HS256", "sign_type": "SIGN"}, - ) - return ret diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_binary_response.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_binary_response.py deleted file mode 100644 index 51623bd860..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_binary_response.py +++ /dev/null @@ -1,207 +0,0 @@ -from __future__ import annotations - -import os -from collections.abc import AsyncIterator, Iterator -from typing import Any - -import httpx - - -class HttpxResponseContent: - @property - def content(self) -> bytes: - raise NotImplementedError("This method is not implemented for this class.") - - @property - def text(self) -> str: - raise NotImplementedError("This method is not implemented for this class.") - - @property - def encoding(self) -> str | None: - raise NotImplementedError("This method is not implemented for this class.") - - @property - def charset_encoding(self) -> str | None: - raise NotImplementedError("This method is not implemented for this class.") - - def json(self, **kwargs: Any) -> Any: - raise NotImplementedError("This method is not implemented for this class.") - - def read(self) -> bytes: - raise NotImplementedError("This method is not implemented for this class.") - - def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]: - raise NotImplementedError("This method is not implemented for this class.") - - def iter_text(self, chunk_size: int | None = None) -> Iterator[str]: - raise NotImplementedError("This method is not implemented for this class.") - - def iter_lines(self) -> Iterator[str]: - raise NotImplementedError("This method is not implemented for this class.") - - def iter_raw(self, chunk_size: int | None = None) -> Iterator[bytes]: - raise NotImplementedError("This method is not implemented for this class.") - - def write_to_file( - self, - file: str | os.PathLike[str], - ) -> None: - raise NotImplementedError("This method is not implemented for this class.") - - def stream_to_file( - self, - file: str | os.PathLike[str], - *, - chunk_size: int | None = None, - ) -> None: - raise NotImplementedError("This method is not implemented for this class.") - - def close(self) -> None: - raise NotImplementedError("This method is not implemented for this class.") - - async def aread(self) -> bytes: - raise NotImplementedError("This method is not implemented for this class.") - - async def aiter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: - raise NotImplementedError("This method is not implemented for this class.") - - async def aiter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]: - raise NotImplementedError("This method is not implemented for this class.") - - async def aiter_lines(self) -> AsyncIterator[str]: - raise NotImplementedError("This method is not implemented for this class.") - - async def aiter_raw(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: - raise NotImplementedError("This method is not implemented for this class.") - - async def astream_to_file( - self, - file: str | os.PathLike[str], - *, - chunk_size: int | None = None, - ) -> None: - raise NotImplementedError("This method is not implemented for this class.") - - async def aclose(self) -> None: - raise NotImplementedError("This method is not implemented for this class.") - - -class HttpxBinaryResponseContent(HttpxResponseContent): - response: httpx.Response - - def __init__(self, response: httpx.Response) -> None: - self.response = response - - @property - def content(self) -> bytes: - return self.response.content - - @property - def encoding(self) -> str | None: - return self.response.encoding - - @property - def charset_encoding(self) -> str | None: - return self.response.charset_encoding - - def read(self) -> bytes: - return self.response.read() - - def text(self) -> str: - raise NotImplementedError("Not implemented for binary response content") - - def json(self, **kwargs: Any) -> Any: - raise NotImplementedError("Not implemented for binary response content") - - def iter_text(self, chunk_size: int | None = None) -> Iterator[str]: - raise NotImplementedError("Not implemented for binary response content") - - def iter_lines(self) -> Iterator[str]: - raise NotImplementedError("Not implemented for binary response content") - - async def aiter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]: - raise NotImplementedError("Not implemented for binary response content") - - async def aiter_lines(self) -> AsyncIterator[str]: - raise NotImplementedError("Not implemented for binary response content") - - def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]: - return self.response.iter_bytes(chunk_size) - - def iter_raw(self, chunk_size: int | None = None) -> Iterator[bytes]: - return self.response.iter_raw(chunk_size) - - def write_to_file( - self, - file: str | os.PathLike[str], - ) -> None: - """Write the output to the given file. - - Accepts a filename or any path-like object, e.g. pathlib.Path - - Note: if you want to stream the data to the file instead of writing - all at once then you should use `.with_streaming_response` when making - the API request, e.g. `client.with_streaming_response.foo().stream_to_file('my_filename.txt')` - """ - with open(file, mode="wb") as f: - for data in self.response.iter_bytes(): - f.write(data) - - def stream_to_file( - self, - file: str | os.PathLike[str], - *, - chunk_size: int | None = None, - ) -> None: - with open(file, mode="wb") as f: - for data in self.response.iter_bytes(chunk_size): - f.write(data) - - def close(self) -> None: - return self.response.close() - - async def aread(self) -> bytes: - return await self.response.aread() - - async def aiter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: - return self.response.aiter_bytes(chunk_size) - - async def aiter_raw(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: - return self.response.aiter_raw(chunk_size) - - async def astream_to_file( - self, - file: str | os.PathLike[str], - *, - chunk_size: int | None = None, - ) -> None: - path = anyio.Path(file) - async with await path.open(mode="wb") as f: - async for data in self.response.aiter_bytes(chunk_size): - await f.write(data) - - async def aclose(self) -> None: - return await self.response.aclose() - - -class HttpxTextBinaryResponseContent(HttpxBinaryResponseContent): - response: httpx.Response - - @property - def text(self) -> str: - return self.response.text - - def json(self, **kwargs: Any) -> Any: - return self.response.json(**kwargs) - - def iter_text(self, chunk_size: int | None = None) -> Iterator[str]: - return self.response.iter_text(chunk_size) - - def iter_lines(self) -> Iterator[str]: - return self.response.iter_lines() - - async def aiter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]: - return self.response.aiter_text(chunk_size) - - async def aiter_lines(self) -> AsyncIterator[str]: - return self.response.aiter_lines() diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py deleted file mode 100644 index 51bf21bcdc..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_legacy_response.py +++ /dev/null @@ -1,341 +0,0 @@ -from __future__ import annotations - -import datetime -import functools -import inspect -import logging -from collections.abc import Callable -from typing import TYPE_CHECKING, Any, Generic, TypeVar, Union, cast, get_origin, overload - -import httpx -import pydantic -from typing_extensions import ParamSpec, override - -from ._base_models import BaseModel, is_basemodel -from ._base_type import NoneType -from ._constants import RAW_RESPONSE_HEADER -from ._errors import APIResponseValidationError -from ._legacy_binary_response import HttpxResponseContent, HttpxTextBinaryResponseContent -from ._sse_client import StreamResponse, extract_stream_chunk_type, is_stream_class_type -from ._utils import extract_type_arg, is_annotated_type, is_given - -if TYPE_CHECKING: - from ._http_client import HttpClient - from ._request_opt import FinalRequestOptions - -P = ParamSpec("P") -R = TypeVar("R") -_T = TypeVar("_T") - -log: logging.Logger = logging.getLogger(__name__) - - -class LegacyAPIResponse(Generic[R]): - """This is a legacy class as it will be replaced by `APIResponse` - and `AsyncAPIResponse` in the `_response.py` file in the next major - release. - - For the sync client this will mostly be the same with the exception - of `content` & `text` will be methods instead of properties. In the - async client, all methods will be async. - - A migration script will be provided & the migration in general should - be smooth. - """ - - _cast_type: type[R] - _client: HttpClient - _parsed_by_type: dict[type[Any], Any] - _stream: bool - _stream_cls: type[StreamResponse[Any]] | None - _options: FinalRequestOptions - - http_response: httpx.Response - - def __init__( - self, - *, - raw: httpx.Response, - cast_type: type[R], - client: HttpClient, - stream: bool, - stream_cls: type[StreamResponse[Any]] | None, - options: FinalRequestOptions, - ) -> None: - self._cast_type = cast_type - self._client = client - self._parsed_by_type = {} - self._stream = stream - self._stream_cls = stream_cls - self._options = options - self.http_response = raw - - @property - def request_id(self) -> str | None: - return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return] - - @overload - def parse(self, *, to: type[_T]) -> _T: ... - - @overload - def parse(self) -> R: ... - - def parse(self, *, to: type[_T] | None = None) -> R | _T: - """Returns the rich python representation of this response's data. - - NOTE: For the async client: this will become a coroutine in the next major version. - - For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. - - You can customize the type that the response is parsed into through - the `to` argument, e.g. - - ```py - from zhipuai import BaseModel - - - class MyModel(BaseModel): - foo: str - - - obj = response.parse(to=MyModel) - print(obj.foo) - ``` - - We support parsing: - - `BaseModel` - - `dict` - - `list` - - `Union` - - `str` - - `int` - - `float` - - `httpx.Response` - """ - cache_key = to if to is not None else self._cast_type - cached = self._parsed_by_type.get(cache_key) - if cached is not None: - return cached # type: ignore[no-any-return] - - parsed = self._parse(to=to) - if is_given(self._options.post_parser): - parsed = self._options.post_parser(parsed) - - self._parsed_by_type[cache_key] = parsed - return parsed - - @property - def headers(self) -> httpx.Headers: - return self.http_response.headers - - @property - def http_request(self) -> httpx.Request: - return self.http_response.request - - @property - def status_code(self) -> int: - return self.http_response.status_code - - @property - def url(self) -> httpx.URL: - return self.http_response.url - - @property - def method(self) -> str: - return self.http_request.method - - @property - def content(self) -> bytes: - """Return the binary response content. - - NOTE: this will be removed in favour of `.read()` in the - next major version. - """ - return self.http_response.content - - @property - def text(self) -> str: - """Return the decoded response content. - - NOTE: this will be turned into a method in the next major version. - """ - return self.http_response.text - - @property - def http_version(self) -> str: - return self.http_response.http_version - - @property - def is_closed(self) -> bool: - return self.http_response.is_closed - - @property - def elapsed(self) -> datetime.timedelta: - """The time taken for the complete request/response cycle to complete.""" - return self.http_response.elapsed - - def _parse(self, *, to: type[_T] | None = None) -> R | _T: - # unwrap `Annotated[T, ...]` -> `T` - if to and is_annotated_type(to): - to = extract_type_arg(to, 0) - - if self._stream: - if to: - if not is_stream_class_type(to): - raise TypeError(f"Expected custom parse type to be a subclass of {StreamResponse}") - - return cast( - _T, - to( - cast_type=extract_stream_chunk_type( - to, - failure_message="Expected custom stream type to be passed with a type argument, e.g. StreamResponse[ChunkType]", # noqa: E501 - ), - response=self.http_response, - client=cast(Any, self._client), - ), - ) - - if self._stream_cls: - return cast( - R, - self._stream_cls( - cast_type=extract_stream_chunk_type(self._stream_cls), - response=self.http_response, - client=cast(Any, self._client), - ), - ) - - stream_cls = cast("type[StreamResponse[Any]] | None", self._client._default_stream_cls) - if stream_cls is None: - raise MissingStreamClassError() - - return cast( - R, - stream_cls( - cast_type=self._cast_type, - response=self.http_response, - client=cast(Any, self._client), - ), - ) - - cast_type = to if to is not None else self._cast_type - - # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(cast_type): - cast_type = extract_type_arg(cast_type, 0) - - if cast_type is NoneType: - return cast(R, None) - - response = self.http_response - if cast_type == str: - return cast(R, response.text) - - if cast_type == int: - return cast(R, int(response.text)) - - if cast_type == float: - return cast(R, float(response.text)) - - origin = get_origin(cast_type) or cast_type - - if inspect.isclass(origin) and issubclass(origin, HttpxResponseContent): - # in the response, e.g. mime file - *_, filename = response.headers.get("content-disposition", "").split("filename=") - # 判断文件类型是jsonl类型的使用HttpxTextBinaryResponseContent - if filename and filename.endswith(".jsonl") or filename and filename.endswith(".xlsx"): - return cast(R, HttpxTextBinaryResponseContent(response)) - else: - return cast(R, cast_type(response)) # type: ignore - - if origin == LegacyAPIResponse: - raise RuntimeError("Unexpected state - cast_type is `APIResponse`") - - if inspect.isclass(origin) and issubclass(origin, httpx.Response): - # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response - # and pass that class to our request functions. We cannot change the variance to be either - # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct - # the response class ourselves but that is something that should be supported directly in httpx - # as it would be easy to incorrectly construct the Response object due to the multitude of arguments. - if cast_type != httpx.Response: - raise ValueError("Subclasses of httpx.Response cannot be passed to `cast_type`") - return cast(R, response) - - if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel): - raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`") - - if ( - cast_type is not object - and origin is not list - and origin is not dict - and origin is not Union - and not issubclass(origin, BaseModel) - ): - raise RuntimeError( - f"Unsupported type, expected {cast_type} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}." # noqa: E501 - ) - - # split is required to handle cases where additional information is included - # in the response, e.g. application/json; charset=utf-8 - content_type, *_ = response.headers.get("content-type", "*").split(";") - if content_type != "application/json": - if is_basemodel(cast_type): - try: - data = response.json() - except Exception as exc: - log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc) - else: - return self._client._process_response_data( - data=data, - cast_type=cast_type, # type: ignore - response=response, - ) - - if self._client._strict_response_validation: - raise APIResponseValidationError( - response=response, - message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.", # noqa: E501 - json_data=response.text, - ) - - # If the API responds with content that isn't JSON then we just return - # the (decoded) text without performing any parsing so that you can still - # handle the response however you need to. - return response.text # type: ignore - - data = response.json() - - return self._client._process_response_data( - data=data, - cast_type=cast_type, # type: ignore - response=response, - ) - - @override - def __repr__(self) -> str: - return f"" - - -class MissingStreamClassError(TypeError): - def __init__(self) -> None: - super().__init__( - "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `openai._streaming` for reference", # noqa: E501 - ) - - -def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, LegacyAPIResponse[R]]: - """Higher order function that takes one of our bound API methods and wraps it - to support returning the raw `APIResponse` object directly. - """ - - @functools.wraps(func) - def wrapped(*args: P.args, **kwargs: P.kwargs) -> LegacyAPIResponse[R]: - extra_headers: dict[str, str] = {**(cast(Any, kwargs.get("extra_headers")) or {})} - extra_headers[RAW_RESPONSE_HEADER] = "true" - - kwargs["extra_headers"] = extra_headers - - return cast(LegacyAPIResponse[R], func(*args, **kwargs)) - - return wrapped diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_request_opt.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_request_opt.py deleted file mode 100644 index c3b894b3a3..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_request_opt.py +++ /dev/null @@ -1,97 +0,0 @@ -from __future__ import annotations - -from collections.abc import Callable -from typing import TYPE_CHECKING, Any, ClassVar, Union, cast - -import pydantic.generics -from httpx import Timeout -from typing_extensions import Required, TypedDict, Unpack, final - -from ._base_compat import PYDANTIC_V2, ConfigDict -from ._base_type import AnyMapping, Body, Headers, HttpxRequestFiles, NotGiven, Query -from ._constants import RAW_RESPONSE_HEADER -from ._utils import is_given, strip_not_given - - -class UserRequestInput(TypedDict, total=False): - headers: Headers - max_retries: int - timeout: float | Timeout | None - params: Query - extra_json: AnyMapping - - -class FinalRequestOptionsInput(TypedDict, total=False): - method: Required[str] - url: Required[str] - params: Query - headers: Headers - max_retries: int - timeout: float | Timeout | None - files: HttpxRequestFiles | None - json_data: Body - extra_json: AnyMapping - - -@final -class FinalRequestOptions(pydantic.BaseModel): - method: str - url: str - params: Query = {} - headers: Union[Headers, NotGiven] = NotGiven() - max_retries: Union[int, NotGiven] = NotGiven() - timeout: Union[float, Timeout, None, NotGiven] = NotGiven() - files: Union[HttpxRequestFiles, None] = None - idempotency_key: Union[str, None] = None - post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven() - - # It should be noted that we cannot use `json` here as that would override - # a BaseModel method in an incompatible fashion. - json_data: Union[Body, None] = None - extra_json: Union[AnyMapping, None] = None - - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) - else: - - class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] - arbitrary_types_allowed: bool = True - - def get_max_retries(self, max_retries: int) -> int: - if isinstance(self.max_retries, NotGiven): - return max_retries - return self.max_retries - - def _strip_raw_response_header(self) -> None: - if not is_given(self.headers): - return - - if self.headers.get(RAW_RESPONSE_HEADER): - self.headers = {**self.headers} - self.headers.pop(RAW_RESPONSE_HEADER) - - # override the `construct` method so that we can run custom transformations. - # this is necessary as we don't want to do any actual runtime type checking - # (which means we can't use validators) but we do want to ensure that `NotGiven` - # values are not present - # - # type ignore required because we're adding explicit types to `**values` - @classmethod - def construct( # type: ignore - cls, - _fields_set: set[str] | None = None, - **values: Unpack[UserRequestInput], - ) -> FinalRequestOptions: - kwargs: dict[str, Any] = { - # we unconditionally call `strip_not_given` on any value - # as it will just ignore any non-mapping types - key: strip_not_given(value) - for key, value in values.items() - } - if PYDANTIC_V2: - return super().model_construct(_fields_set, **kwargs) - return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated] - - if not TYPE_CHECKING: - # type checkers incorrectly complain about this assignment - model_construct = construct diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py deleted file mode 100644 index 92e6018055..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_response.py +++ /dev/null @@ -1,398 +0,0 @@ -from __future__ import annotations - -import datetime -import inspect -import logging -from collections.abc import Iterator -from typing import TYPE_CHECKING, Any, Generic, TypeVar, Union, cast, get_origin, overload - -import httpx -import pydantic -from typing_extensions import ParamSpec, override - -from ._base_models import BaseModel, is_basemodel -from ._base_type import NoneType -from ._errors import APIResponseValidationError, ZhipuAIError -from ._sse_client import StreamResponse, extract_stream_chunk_type, is_stream_class_type -from ._utils import extract_type_arg, extract_type_var_from_base, is_annotated_type, is_given - -if TYPE_CHECKING: - from ._http_client import HttpClient - from ._request_opt import FinalRequestOptions - -P = ParamSpec("P") -R = TypeVar("R") -_T = TypeVar("_T") -_APIResponseT = TypeVar("_APIResponseT", bound="APIResponse[Any]") -log: logging.Logger = logging.getLogger(__name__) - - -class BaseAPIResponse(Generic[R]): - _cast_type: type[R] - _client: HttpClient - _parsed_by_type: dict[type[Any], Any] - _is_sse_stream: bool - _stream_cls: type[StreamResponse[Any]] - _options: FinalRequestOptions - http_response: httpx.Response - - def __init__( - self, - *, - raw: httpx.Response, - cast_type: type[R], - client: HttpClient, - stream: bool, - stream_cls: type[StreamResponse[Any]] | None = None, - options: FinalRequestOptions, - ) -> None: - self._cast_type = cast_type - self._client = client - self._parsed_by_type = {} - self._is_sse_stream = stream - self._stream_cls = stream_cls - self._options = options - self.http_response = raw - - def _parse(self, *, to: type[_T] | None = None) -> R | _T: - # unwrap `Annotated[T, ...]` -> `T` - if to and is_annotated_type(to): - to = extract_type_arg(to, 0) - - if self._is_sse_stream: - if to: - if not is_stream_class_type(to): - raise TypeError(f"Expected custom parse type to be a subclass of {StreamResponse}") - - return cast( - _T, - to( - cast_type=extract_stream_chunk_type( - to, - failure_message="Expected custom stream type to be passed with a type argument, e.g. StreamResponse[ChunkType]", # noqa: E501 - ), - response=self.http_response, - client=cast(Any, self._client), - ), - ) - - if self._stream_cls: - return cast( - R, - self._stream_cls( - cast_type=extract_stream_chunk_type(self._stream_cls), - response=self.http_response, - client=cast(Any, self._client), - ), - ) - - stream_cls = cast("type[Stream[Any]] | None", self._client._default_stream_cls) - if stream_cls is None: - raise MissingStreamClassError() - - return cast( - R, - stream_cls( - cast_type=self._cast_type, - response=self.http_response, - client=cast(Any, self._client), - ), - ) - - cast_type = to if to is not None else self._cast_type - - # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(cast_type): - cast_type = extract_type_arg(cast_type, 0) - - if cast_type is NoneType: - return cast(R, None) - - response = self.http_response - if cast_type == str: - return cast(R, response.text) - - if cast_type == bytes: - return cast(R, response.content) - - if cast_type == int: - return cast(R, int(response.text)) - - if cast_type == float: - return cast(R, float(response.text)) - - origin = get_origin(cast_type) or cast_type - - # handle the legacy binary response case - if inspect.isclass(cast_type) and cast_type.__name__ == "HttpxBinaryResponseContent": - return cast(R, cast_type(response)) # type: ignore - - if origin == APIResponse: - raise RuntimeError("Unexpected state - cast_type is `APIResponse`") - - if inspect.isclass(origin) and issubclass(origin, httpx.Response): - # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response - # and pass that class to our request functions. We cannot change the variance to be either - # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct - # the response class ourselves but that is something that should be supported directly in httpx - # as it would be easy to incorrectly construct the Response object due to the multitude of arguments. - if cast_type != httpx.Response: - raise ValueError("Subclasses of httpx.Response cannot be passed to `cast_type`") - return cast(R, response) - - if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel): - raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`") - - if ( - cast_type is not object - and origin is not list - and origin is not dict - and origin is not Union - and not issubclass(origin, BaseModel) - ): - raise RuntimeError( - f"Unsupported type, expected {cast_type} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}." # noqa: E501 - ) - - # split is required to handle cases where additional information is included - # in the response, e.g. application/json; charset=utf-8 - content_type, *_ = response.headers.get("content-type", "*").split(";") - if content_type != "application/json": - if is_basemodel(cast_type): - try: - data = response.json() - except Exception as exc: - log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc) - else: - return self._client._process_response_data( - data=data, - cast_type=cast_type, # type: ignore - response=response, - ) - - if self._client._strict_response_validation: - raise APIResponseValidationError( - response=response, - message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.", # noqa: E501 - json_data=response.text, - ) - - # If the API responds with content that isn't JSON then we just return - # the (decoded) text without performing any parsing so that you can still - # handle the response however you need to. - return response.text # type: ignore - - data = response.json() - - return self._client._process_response_data( - data=data, - cast_type=cast_type, # type: ignore - response=response, - ) - - @property - def headers(self) -> httpx.Headers: - return self.http_response.headers - - @property - def http_request(self) -> httpx.Request: - """Returns the httpx Request instance associated with the current response.""" - return self.http_response.request - - @property - def status_code(self) -> int: - return self.http_response.status_code - - @property - def url(self) -> httpx.URL: - """Returns the URL for which the request was made.""" - return self.http_response.url - - @property - def method(self) -> str: - return self.http_request.method - - @property - def http_version(self) -> str: - return self.http_response.http_version - - @property - def elapsed(self) -> datetime.timedelta: - """The time taken for the complete request/response cycle to complete.""" - return self.http_response.elapsed - - @property - def is_closed(self) -> bool: - """Whether or not the response body has been closed. - - If this is False then there is response data that has not been read yet. - You must either fully consume the response body or call `.close()` - before discarding the response to prevent resource leaks. - """ - return self.http_response.is_closed - - @override - def __repr__(self) -> str: - return f"<{self.__class__.__name__} [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_type}>" # noqa: E501 - - -class APIResponse(BaseAPIResponse[R]): - @property - def request_id(self) -> str | None: - return self.http_response.headers.get("x-request-id") # type: ignore[no-any-return] - - @overload - def parse(self, *, to: type[_T]) -> _T: ... - - @overload - def parse(self) -> R: ... - - def parse(self, *, to: type[_T] | None = None) -> R | _T: - """Returns the rich python representation of this response's data. - - For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. - - You can customize the type that the response is parsed into through - the `to` argument, e.g. - - ```py - from openai import BaseModel - - - class MyModel(BaseModel): - foo: str - - - obj = response.parse(to=MyModel) - print(obj.foo) - ``` - - We support parsing: - - `BaseModel` - - `dict` - - `list` - - `Union` - - `str` - - `int` - - `float` - - `httpx.Response` - """ - cache_key = to if to is not None else self._cast_type - cached = self._parsed_by_type.get(cache_key) - if cached is not None: - return cached # type: ignore[no-any-return] - - if not self._is_sse_stream: - self.read() - - parsed = self._parse(to=to) - if is_given(self._options.post_parser): - parsed = self._options.post_parser(parsed) - - self._parsed_by_type[cache_key] = parsed - return parsed - - def read(self) -> bytes: - """Read and return the binary response content.""" - try: - return self.http_response.read() - except httpx.StreamConsumed as exc: - # The default error raised by httpx isn't very - # helpful in our case so we re-raise it with - # a different error message. - raise StreamAlreadyConsumed() from exc - - def text(self) -> str: - """Read and decode the response content into a string.""" - self.read() - return self.http_response.text - - def json(self) -> object: - """Read and decode the JSON response content.""" - self.read() - return self.http_response.json() - - def close(self) -> None: - """Close the response and release the connection. - - Automatically called if the response body is read to completion. - """ - self.http_response.close() - - def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]: - """ - A byte-iterator over the decoded response content. - - This automatically handles gzip, deflate and brotli encoded responses. - """ - yield from self.http_response.iter_bytes(chunk_size) - - def iter_text(self, chunk_size: int | None = None) -> Iterator[str]: - """A str-iterator over the decoded response content - that handles both gzip, deflate, etc but also detects the content's - string encoding. - """ - yield from self.http_response.iter_text(chunk_size) - - def iter_lines(self) -> Iterator[str]: - """Like `iter_text()` but will only yield chunks for each line""" - yield from self.http_response.iter_lines() - - -class MissingStreamClassError(TypeError): - def __init__(self) -> None: - super().__init__( - "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `openai._streaming` for reference", # noqa: E501 - ) - - -class StreamAlreadyConsumed(ZhipuAIError): # noqa: N818 - """ - Attempted to read or stream content, but the content has already - been streamed. - - This can happen if you use a method like `.iter_lines()` and then attempt - to read th entire response body afterwards, e.g. - - ```py - response = await client.post(...) - async for line in response.iter_lines(): - ... # do something with `line` - - content = await response.read() - # ^ error - ``` - - If you want this behavior you'll need to either manually accumulate the response - content or call `await response.read()` before iterating over the stream. - """ - - def __init__(self) -> None: - message = ( - "Attempted to read or stream some content, but the content has " - "already been streamed. " - "This could be due to attempting to stream the response " - "content more than once." - "\n\n" - "You can fix this by manually accumulating the response content while streaming " - "or by calling `.read()` before starting to stream." - ) - super().__init__(message) - - -def extract_response_type(typ: type[BaseAPIResponse[Any]]) -> type: - """Given a type like `APIResponse[T]`, returns the generic type variable `T`. - - This also handles the case where a concrete subclass is given, e.g. - ```py - class MyResponse(APIResponse[bytes]): - ... - - extract_response_type(MyResponse) -> bytes - ``` - """ - return extract_type_var_from_base( - typ, - generic_bases=cast("tuple[type, ...]", (BaseAPIResponse, APIResponse)), - index=0, - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_sse_client.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_sse_client.py deleted file mode 100644 index cbc449d244..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_sse_client.py +++ /dev/null @@ -1,206 +0,0 @@ -from __future__ import annotations - -import inspect -import json -from collections.abc import Iterator, Mapping -from typing import TYPE_CHECKING, Generic, TypeGuard, cast - -import httpx - -from . import get_origin -from ._base_type import ResponseT -from ._errors import APIResponseError -from ._utils import extract_type_var_from_base, is_mapping - -_FIELD_SEPARATOR = ":" - -if TYPE_CHECKING: - from ._http_client import HttpClient - - -class StreamResponse(Generic[ResponseT]): - response: httpx.Response - _cast_type: type[ResponseT] - - def __init__( - self, - *, - cast_type: type[ResponseT], - response: httpx.Response, - client: HttpClient, - ) -> None: - self.response = response - self._cast_type = cast_type - self._data_process_func = client._process_response_data - self._stream_chunks = self.__stream__() - - def __next__(self) -> ResponseT: - return self._stream_chunks.__next__() - - def __iter__(self) -> Iterator[ResponseT]: - yield from self._stream_chunks - - def __stream__(self) -> Iterator[ResponseT]: - sse_line_parser = SSELineParser() - iterator = sse_line_parser.iter_lines(self.response.iter_lines()) - - for sse in iterator: - if sse.data.startswith("[DONE]"): - break - - if sse.event is None: - data = sse.json_data() - if isinstance(data, Mapping) and data.get("error"): - raise APIResponseError( - message="An error occurred during streaming", - request=self.response.request, - json_data=data["error"], - ) - if sse.event is None: - data = sse.json_data() - if is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIResponseError( - message=message, - request=self.response.request, - json_data=data["error"], - ) - yield self._data_process_func(data=data, cast_type=self._cast_type, response=self.response) - - else: - data = sse.json_data() - - if sse.event == "error" and is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIResponseError( - message=message, - request=self.response.request, - json_data=data["error"], - ) - yield self._data_process_func(data=data, cast_type=self._cast_type, response=self.response) - - for sse in iterator: - pass - - -class Event: - def __init__( - self, event: str | None = None, data: str | None = None, id: str | None = None, retry: int | None = None - ): - self._event = event - self._data = data - self._id = id - self._retry = retry - - def __repr__(self): - data_len = len(self._data) if self._data else 0 - return ( - f"Event(event={self._event}, data={self._data} ,data_length={data_len}, id={self._id}, retry={self._retry}" - ) - - @property - def event(self): - return self._event - - @property - def data(self): - return self._data - - def json_data(self): - return json.loads(self._data) - - @property - def id(self): - return self._id - - @property - def retry(self): - return self._retry - - -class SSELineParser: - _data: list[str] - _event: str | None - _retry: int | None - _id: str | None - - def __init__(self): - self._event = None - self._data = [] - self._id = None - self._retry = None - - def iter_lines(self, lines: Iterator[str]) -> Iterator[Event]: - for line in lines: - line = line.rstrip("\n") - if not line: - if self._event is None and not self._data and self._id is None and self._retry is None: - continue - sse_event = Event(event=self._event, data="\n".join(self._data), id=self._id, retry=self._retry) - self._event = None - self._data = [] - self._id = None - self._retry = None - - yield sse_event - self.decode_line(line) - - def decode_line(self, line: str): - if line.startswith(":") or not line: - return - - field, _p, value = line.partition(":") - - value = value.removeprefix(" ") - if field == "data": - self._data.append(value) - elif field == "event": - self._event = value - elif field == "retry": - try: - self._retry = int(value) - except (TypeError, ValueError): - pass - return - - -def is_stream_class_type(typ: type) -> TypeGuard[type[StreamResponse[object]]]: - """TypeGuard for determining whether or not the given type is a subclass of `Stream` / `AsyncStream`""" - origin = get_origin(typ) or typ - return inspect.isclass(origin) and issubclass(origin, StreamResponse) - - -def extract_stream_chunk_type( - stream_cls: type, - *, - failure_message: str | None = None, -) -> type: - """Given a type like `StreamResponse[T]`, returns the generic type variable `T`. - - This also handles the case where a concrete subclass is given, e.g. - ```py - class MyStream(StreamResponse[bytes]): - ... - - extract_stream_chunk_type(MyStream) -> bytes - ``` - """ - - return extract_type_var_from_base( - stream_cls, - index=0, - generic_bases=cast("tuple[type, ...]", (StreamResponse,)), - failure_message=failure_message, - ) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/__init__.py deleted file mode 100644 index a66b095816..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -from ._utils import ( # noqa: I001 - remove_notgiven_indict as remove_notgiven_indict, # noqa: PLC0414 - flatten as flatten, # noqa: PLC0414 - is_dict as is_dict, # noqa: PLC0414 - is_list as is_list, # noqa: PLC0414 - is_given as is_given, # noqa: PLC0414 - is_tuple as is_tuple, # noqa: PLC0414 - is_mapping as is_mapping, # noqa: PLC0414 - is_tuple_t as is_tuple_t, # noqa: PLC0414 - parse_date as parse_date, # noqa: PLC0414 - is_iterable as is_iterable, # noqa: PLC0414 - is_sequence as is_sequence, # noqa: PLC0414 - coerce_float as coerce_float, # noqa: PLC0414 - is_mapping_t as is_mapping_t, # noqa: PLC0414 - removeprefix as removeprefix, # noqa: PLC0414 - removesuffix as removesuffix, # noqa: PLC0414 - extract_files as extract_files, # noqa: PLC0414 - is_sequence_t as is_sequence_t, # noqa: PLC0414 - required_args as required_args, # noqa: PLC0414 - coerce_boolean as coerce_boolean, # noqa: PLC0414 - coerce_integer as coerce_integer, # noqa: PLC0414 - file_from_path as file_from_path, # noqa: PLC0414 - parse_datetime as parse_datetime, # noqa: PLC0414 - strip_not_given as strip_not_given, # noqa: PLC0414 - deepcopy_minimal as deepcopy_minimal, # noqa: PLC0414 - get_async_library as get_async_library, # noqa: PLC0414 - maybe_coerce_float as maybe_coerce_float, # noqa: PLC0414 - get_required_header as get_required_header, # noqa: PLC0414 - maybe_coerce_boolean as maybe_coerce_boolean, # noqa: PLC0414 - maybe_coerce_integer as maybe_coerce_integer, # noqa: PLC0414 - drop_prefix_image_data as drop_prefix_image_data, # noqa: PLC0414 -) - - -from ._typing import ( - is_list_type as is_list_type, # noqa: PLC0414 - is_union_type as is_union_type, # noqa: PLC0414 - extract_type_arg as extract_type_arg, # noqa: PLC0414 - is_iterable_type as is_iterable_type, # noqa: PLC0414 - is_required_type as is_required_type, # noqa: PLC0414 - is_annotated_type as is_annotated_type, # noqa: PLC0414 - strip_annotated_type as strip_annotated_type, # noqa: PLC0414 - extract_type_var_from_base as extract_type_var_from_base, # noqa: PLC0414 -) - -from ._transform import ( - PropertyInfo as PropertyInfo, # noqa: PLC0414 - transform as transform, # noqa: PLC0414 - async_transform as async_transform, # noqa: PLC0414 - maybe_transform as maybe_transform, # noqa: PLC0414 - async_maybe_transform as async_maybe_transform, # noqa: PLC0414 -) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_transform.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_transform.py deleted file mode 100644 index e8ef1f7935..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_transform.py +++ /dev/null @@ -1,383 +0,0 @@ -from __future__ import annotations - -import base64 -import io -import pathlib -from collections.abc import Mapping -from datetime import date, datetime -from typing import Any, Literal, TypeVar, cast, get_args, get_type_hints - -import anyio -import pydantic -from typing_extensions import override - -from .._base_compat import is_typeddict, model_dump -from .._files import is_base64_file_input -from ._typing import ( - extract_type_arg, - is_annotated_type, - is_iterable_type, - is_list_type, - is_required_type, - is_union_type, - strip_annotated_type, -) -from ._utils import ( - is_iterable, - is_list, - is_mapping, -) - -_T = TypeVar("_T") - - -# TODO: support for drilling globals() and locals() -# TODO: ensure works correctly with forward references in all cases - - -PropertyFormat = Literal["iso8601", "base64", "custom"] - - -class PropertyInfo: - """Metadata class to be used in Annotated types to provide information about a given type. - - For example: - - class MyParams(TypedDict): - account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')] - - This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API. - """ # noqa: E501 - - alias: str | None - format: PropertyFormat | None - format_template: str | None - discriminator: str | None - - def __init__( - self, - *, - alias: str | None = None, - format: PropertyFormat | None = None, - format_template: str | None = None, - discriminator: str | None = None, - ) -> None: - self.alias = alias - self.format = format - self.format_template = format_template - self.discriminator = discriminator - - @override - def __repr__(self) -> str: - return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}', discriminator='{self.discriminator}')" # noqa: E501 - - -def maybe_transform( - data: object, - expected_type: object, -) -> Any | None: - """Wrapper over `transform()` that allows `None` to be passed. - - See `transform()` for more details. - """ - if data is None: - return None - return transform(data, expected_type) - - -# Wrapper over _transform_recursive providing fake types -def transform( - data: _T, - expected_type: object, -) -> _T: - """Transform dictionaries based off of type information from the given type, for example: - - ```py - class Params(TypedDict, total=False): - card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]] - - - transformed = transform({"card_id": ""}, Params) - # {'cardID': ''} - ``` - - Any keys / data that does not have type information given will be included as is. - - It should be noted that the transformations that this function does are not represented in the type system. - """ - transformed = _transform_recursive(data, annotation=cast(type, expected_type)) - return cast(_T, transformed) - - -def _get_annotated_type(type_: type) -> type | None: - """If the given type is an `Annotated` type then it is returned, if not `None` is returned. - - This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]` - """ - if is_required_type(type_): - # Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]` - type_ = get_args(type_)[0] - - if is_annotated_type(type_): - return type_ - - return None - - -def _maybe_transform_key(key: str, type_: type) -> str: - """Transform the given `data` based on the annotations provided in `type_`. - - Note: this function only looks at `Annotated` types that contain `PropertInfo` metadata. - """ - annotated_type = _get_annotated_type(type_) - if annotated_type is None: - # no `Annotated` definition for this type, no transformation needed - return key - - # ignore the first argument as it is the actual type - annotations = get_args(annotated_type)[1:] - for annotation in annotations: - if isinstance(annotation, PropertyInfo) and annotation.alias is not None: - return annotation.alias - - return key - - -def _transform_recursive( - data: object, - *, - annotation: type, - inner_type: type | None = None, -) -> object: - """Transform the given data against the expected type. - - Args: - annotation: The direct type annotation given to the particular piece of data. - This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc - - inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type - is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in - the list can be transformed using the metadata from the container type. - - Defaults to the same value as the `annotation` argument. - """ - if inner_type is None: - inner_type = annotation - - stripped_type = strip_annotated_type(inner_type) - if is_typeddict(stripped_type) and is_mapping(data): - return _transform_typeddict(data, stripped_type) - - if ( - # List[T] - (is_list_type(stripped_type) and is_list(data)) - # Iterable[T] - or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) - ): - inner_type = extract_type_arg(stripped_type, 0) - return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] - - if is_union_type(stripped_type): - # For union types we run the transformation against all subtypes to ensure that everything is transformed. - # - # TODO: there may be edge cases where the same normalized field name will transform to two different names - # in different subtypes. - for subtype in get_args(stripped_type): - data = _transform_recursive(data, annotation=annotation, inner_type=subtype) - return data - - if isinstance(data, pydantic.BaseModel): - return model_dump(data, exclude_unset=True) - - annotated_type = _get_annotated_type(annotation) - if annotated_type is None: - return data - - # ignore the first argument as it is the actual type - annotations = get_args(annotated_type)[1:] - for annotation in annotations: - if isinstance(annotation, PropertyInfo) and annotation.format is not None: - return _format_data(data, annotation.format, annotation.format_template) - - return data - - -def _format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object: - if isinstance(data, date | datetime): - if format_ == "iso8601": - return data.isoformat() - - if format_ == "custom" and format_template is not None: - return data.strftime(format_template) - - if format_ == "base64" and is_base64_file_input(data): - binary: str | bytes | None = None - - if isinstance(data, pathlib.Path): - binary = data.read_bytes() - elif isinstance(data, io.IOBase): - binary = data.read() - - if isinstance(binary, str): # type: ignore[unreachable] - binary = binary.encode() - - if not isinstance(binary, bytes): - raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}") - - return base64.b64encode(binary).decode("ascii") - - return data - - -def _transform_typeddict( - data: Mapping[str, object], - expected_type: type, -) -> Mapping[str, object]: - result: dict[str, object] = {} - annotations = get_type_hints(expected_type, include_extras=True) - for key, value in data.items(): - type_ = annotations.get(key) - if type_ is None: - # we do not have a type annotation for this field, leave it as is - result[key] = value - else: - result[_maybe_transform_key(key, type_)] = _transform_recursive(value, annotation=type_) - return result - - -async def async_maybe_transform( - data: object, - expected_type: object, -) -> Any | None: - """Wrapper over `async_transform()` that allows `None` to be passed. - - See `async_transform()` for more details. - """ - if data is None: - return None - return await async_transform(data, expected_type) - - -async def async_transform( - data: _T, - expected_type: object, -) -> _T: - """Transform dictionaries based off of type information from the given type, for example: - - ```py - class Params(TypedDict, total=False): - card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]] - - - transformed = transform({"card_id": ""}, Params) - # {'cardID': ''} - ``` - - Any keys / data that does not have type information given will be included as is. - - It should be noted that the transformations that this function does are not represented in the type system. - """ - transformed = await _async_transform_recursive(data, annotation=cast(type, expected_type)) - return cast(_T, transformed) - - -async def _async_transform_recursive( - data: object, - *, - annotation: type, - inner_type: type | None = None, -) -> object: - """Transform the given data against the expected type. - - Args: - annotation: The direct type annotation given to the particular piece of data. - This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc - - inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type - is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in - the list can be transformed using the metadata from the container type. - - Defaults to the same value as the `annotation` argument. - """ - if inner_type is None: - inner_type = annotation - - stripped_type = strip_annotated_type(inner_type) - if is_typeddict(stripped_type) and is_mapping(data): - return await _async_transform_typeddict(data, stripped_type) - - if ( - # List[T] - (is_list_type(stripped_type) and is_list(data)) - # Iterable[T] - or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) - ): - inner_type = extract_type_arg(stripped_type, 0) - return [await _async_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] - - if is_union_type(stripped_type): - # For union types we run the transformation against all subtypes to ensure that everything is transformed. - # - # TODO: there may be edge cases where the same normalized field name will transform to two different names - # in different subtypes. - for subtype in get_args(stripped_type): - data = await _async_transform_recursive(data, annotation=annotation, inner_type=subtype) - return data - - if isinstance(data, pydantic.BaseModel): - return model_dump(data, exclude_unset=True) - - annotated_type = _get_annotated_type(annotation) - if annotated_type is None: - return data - - # ignore the first argument as it is the actual type - annotations = get_args(annotated_type)[1:] - for annotation in annotations: - if isinstance(annotation, PropertyInfo) and annotation.format is not None: - return await _async_format_data(data, annotation.format, annotation.format_template) - - return data - - -async def _async_format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object: - if isinstance(data, date | datetime): - if format_ == "iso8601": - return data.isoformat() - - if format_ == "custom" and format_template is not None: - return data.strftime(format_template) - - if format_ == "base64" and is_base64_file_input(data): - binary: str | bytes | None = None - - if isinstance(data, pathlib.Path): - binary = await anyio.Path(data).read_bytes() - elif isinstance(data, io.IOBase): - binary = data.read() - - if isinstance(binary, str): # type: ignore[unreachable] - binary = binary.encode() - - if not isinstance(binary, bytes): - raise RuntimeError(f"Could not read bytes from {data}; Received {type(binary)}") - - return base64.b64encode(binary).decode("ascii") - - return data - - -async def _async_transform_typeddict( - data: Mapping[str, object], - expected_type: type, -) -> Mapping[str, object]: - result: dict[str, object] = {} - annotations = get_type_hints(expected_type, include_extras=True) - for key, value in data.items(): - type_ = annotations.get(key) - if type_ is None: - # we do not have a type annotation for this field, leave it as is - result[key] = value - else: - result[_maybe_transform_key(key, type_)] = await _async_transform_recursive(value, annotation=type_) - return result diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_typing.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_typing.py deleted file mode 100644 index c7c54dcc37..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_typing.py +++ /dev/null @@ -1,122 +0,0 @@ -from __future__ import annotations - -from collections import abc as _c_abc -from collections.abc import Iterable -from typing import Annotated, Any, TypeVar, cast, get_args, get_origin - -from typing_extensions import Required - -from .._base_compat import is_union as _is_union -from .._base_type import InheritsGeneric - - -def is_annotated_type(typ: type) -> bool: - return get_origin(typ) == Annotated - - -def is_list_type(typ: type) -> bool: - return (get_origin(typ) or typ) == list - - -def is_iterable_type(typ: type) -> bool: - """If the given type is `typing.Iterable[T]`""" - origin = get_origin(typ) or typ - return origin in {Iterable, _c_abc.Iterable} - - -def is_union_type(typ: type) -> bool: - return _is_union(get_origin(typ)) - - -def is_required_type(typ: type) -> bool: - return get_origin(typ) == Required - - -def is_typevar(typ: type) -> bool: - # type ignore is required because type checkers - # think this expression will always return False - return type(typ) == TypeVar # type: ignore - - -# Extracts T from Annotated[T, ...] or from Required[Annotated[T, ...]] -def strip_annotated_type(typ: type) -> type: - if is_required_type(typ) or is_annotated_type(typ): - return strip_annotated_type(cast(type, get_args(typ)[0])) - - return typ - - -def extract_type_arg(typ: type, index: int) -> type: - args = get_args(typ) - try: - return cast(type, args[index]) - except IndexError as err: - raise RuntimeError(f"Expected type {typ} to have a type argument at index {index} but it did not") from err - - -def extract_type_var_from_base( - typ: type, - *, - generic_bases: tuple[type, ...], - index: int, - failure_message: str | None = None, -) -> type: - """Given a type like `Foo[T]`, returns the generic type variable `T`. - - This also handles the case where a concrete subclass is given, e.g. - ```py - class MyResponse(Foo[bytes]): - ... - - extract_type_var(MyResponse, bases=(Foo,), index=0) -> bytes - ``` - - And where a generic subclass is given: - ```py - _T = TypeVar('_T') - class MyResponse(Foo[_T]): - ... - - extract_type_var(MyResponse[bytes], bases=(Foo,), index=0) -> bytes - ``` - """ - cls = cast(object, get_origin(typ) or typ) - if cls in generic_bases: - # we're given the class directly - return extract_type_arg(typ, index) - - # if a subclass is given - # --- - # this is needed as __orig_bases__ is not present in the typeshed stubs - # because it is intended to be for internal use only, however there does - # not seem to be a way to resolve generic TypeVars for inherited subclasses - # without using it. - if isinstance(cls, InheritsGeneric): - target_base_class: Any | None = None - for base in cls.__orig_bases__: - if base.__origin__ in generic_bases: - target_base_class = base - break - - if target_base_class is None: - raise RuntimeError( - "Could not find the generic base class;\n" - "This should never happen;\n" - f"Does {cls} inherit from one of {generic_bases} ?" - ) - - extracted = extract_type_arg(target_base_class, index) - if is_typevar(extracted): - # If the extracted type argument is itself a type variable - # then that means the subclass itself is generic, so we have - # to resolve the type argument from the class itself, not - # the base class. - # - # Note: if there is more than 1 type argument, the subclass could - # change the ordering of the type arguments, this is not currently - # supported. - return extract_type_arg(typ, index) - - return extracted - - raise RuntimeError(failure_message or f"Could not resolve inner type variable at index {index} for {typ}") diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_utils.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_utils.py deleted file mode 100644 index ce5e7786aa..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_utils/_utils.py +++ /dev/null @@ -1,409 +0,0 @@ -from __future__ import annotations - -import functools -import inspect -import os -import re -from collections.abc import Callable, Iterable, Mapping, Sequence -from pathlib import Path -from typing import ( - Any, - TypeGuard, - TypeVar, - Union, - cast, - overload, -) - -import sniffio - -from .._base_compat import parse_date as parse_date # noqa: PLC0414 -from .._base_compat import parse_datetime as parse_datetime # noqa: PLC0414 -from .._base_type import FileTypes, Headers, HeadersLike, NotGiven, NotGivenOr - - -def remove_notgiven_indict(obj): - if obj is None or (not isinstance(obj, Mapping)): - return obj - return {key: value for key, value in obj.items() if not isinstance(value, NotGiven)} - - -_T = TypeVar("_T") -_TupleT = TypeVar("_TupleT", bound=tuple[object, ...]) -_MappingT = TypeVar("_MappingT", bound=Mapping[str, object]) -_SequenceT = TypeVar("_SequenceT", bound=Sequence[object]) -CallableT = TypeVar("CallableT", bound=Callable[..., Any]) - - -def flatten(t: Iterable[Iterable[_T]]) -> list[_T]: - return [item for sublist in t for item in sublist] - - -def extract_files( - # TODO: this needs to take Dict but variance issues..... - # create protocol type ? - query: Mapping[str, object], - *, - paths: Sequence[Sequence[str]], -) -> list[tuple[str, FileTypes]]: - """Recursively extract files from the given dictionary based on specified paths. - - A path may look like this ['foo', 'files', '', 'data']. - - Note: this mutates the given dictionary. - """ - files: list[tuple[str, FileTypes]] = [] - for path in paths: - files.extend(_extract_items(query, path, index=0, flattened_key=None)) - return files - - -def _extract_items( - obj: object, - path: Sequence[str], - *, - index: int, - flattened_key: str | None, -) -> list[tuple[str, FileTypes]]: - try: - key = path[index] - except IndexError: - if isinstance(obj, NotGiven): - # no value was provided - we can safely ignore - return [] - - # cyclical import - from .._files import assert_is_file_content - - # We have exhausted the path, return the entry we found. - assert_is_file_content(obj, key=flattened_key) - assert flattened_key is not None - return [(flattened_key, cast(FileTypes, obj))] - - index += 1 - if is_dict(obj): - try: - # We are at the last entry in the path so we must remove the field - if (len(path)) == index: - item = obj.pop(key) - else: - item = obj[key] - except KeyError: - # Key was not present in the dictionary, this is not indicative of an error - # as the given path may not point to a required field. We also do not want - # to enforce required fields as the API may differ from the spec in some cases. - return [] - if flattened_key is None: - flattened_key = key - else: - flattened_key += f"[{key}]" - return _extract_items( - item, - path, - index=index, - flattened_key=flattened_key, - ) - elif is_list(obj): - if key != "": - return [] - - return flatten( - [ - _extract_items( - item, - path, - index=index, - flattened_key=flattened_key + "[]" if flattened_key is not None else "[]", - ) - for item in obj - ] - ) - - # Something unexpected was passed, just ignore it. - return [] - - -def is_given(obj: NotGivenOr[_T]) -> TypeGuard[_T]: - return not isinstance(obj, NotGiven) - - -# Type safe methods for narrowing types with TypeVars. -# The default narrowing for isinstance(obj, dict) is dict[unknown, unknown], -# however this cause Pyright to rightfully report errors. As we know we don't -# care about the contained types we can safely use `object` in it's place. -# -# There are two separate functions defined, `is_*` and `is_*_t` for different use cases. -# `is_*` is for when you're dealing with an unknown input -# `is_*_t` is for when you're narrowing a known union type to a specific subset - - -def is_tuple(obj: object) -> TypeGuard[tuple[object, ...]]: - return isinstance(obj, tuple) - - -def is_tuple_t(obj: _TupleT | object) -> TypeGuard[_TupleT]: - return isinstance(obj, tuple) - - -def is_sequence(obj: object) -> TypeGuard[Sequence[object]]: - return isinstance(obj, Sequence) - - -def is_sequence_t(obj: _SequenceT | object) -> TypeGuard[_SequenceT]: - return isinstance(obj, Sequence) - - -def is_mapping(obj: object) -> TypeGuard[Mapping[str, object]]: - return isinstance(obj, Mapping) - - -def is_mapping_t(obj: _MappingT | object) -> TypeGuard[_MappingT]: - return isinstance(obj, Mapping) - - -def is_dict(obj: object) -> TypeGuard[dict[object, object]]: - return isinstance(obj, dict) - - -def is_list(obj: object) -> TypeGuard[list[object]]: - return isinstance(obj, list) - - -def is_iterable(obj: object) -> TypeGuard[Iterable[object]]: - return isinstance(obj, Iterable) - - -def deepcopy_minimal(item: _T) -> _T: - """Minimal reimplementation of copy.deepcopy() that will only copy certain object types: - - - mappings, e.g. `dict` - - list - - This is done for performance reasons. - """ - if is_mapping(item): - return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()}) - if is_list(item): - return cast(_T, [deepcopy_minimal(entry) for entry in item]) - return item - - -# copied from https://github.com/Rapptz/RoboDanny -def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str: - size = len(seq) - if size == 0: - return "" - - if size == 1: - return seq[0] - - if size == 2: - return f"{seq[0]} {final} {seq[1]}" - - return delim.join(seq[:-1]) + f" {final} {seq[-1]}" - - -def quote(string: str) -> str: - """Add single quotation marks around the given string. Does *not* do any escaping.""" - return f"'{string}'" - - -def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]: - """Decorator to enforce a given set of arguments or variants of arguments are passed to the decorated function. - - Useful for enforcing runtime validation of overloaded functions. - - Example usage: - ```py - @overload - def foo(*, a: str) -> str: - ... - - - @overload - def foo(*, b: bool) -> str: - ... - - - # This enforces the same constraints that a static type checker would - # i.e. that either a or b must be passed to the function - @required_args(["a"], ["b"]) - def foo(*, a: str | None = None, b: bool | None = None) -> str: - ... - ``` - """ - - def inner(func: CallableT) -> CallableT: - params = inspect.signature(func).parameters - positional = [ - name - for name, param in params.items() - if param.kind - in { - param.POSITIONAL_ONLY, - param.POSITIONAL_OR_KEYWORD, - } - ] - - @functools.wraps(func) - def wrapper(*args: object, **kwargs: object) -> object: - given_params: set[str] = set() - for i, _ in enumerate(args): - try: - given_params.add(positional[i]) - except IndexError: - raise TypeError( - f"{func.__name__}() takes {len(positional)} argument(s) but {len(args)} were given" - ) from None - - given_params.update(kwargs.keys()) - - for variant in variants: - matches = all(param in given_params for param in variant) - if matches: - break - else: # no break - if len(variants) > 1: - variations = human_join( - ["(" + human_join([quote(arg) for arg in variant], final="and") + ")" for variant in variants] - ) - msg = f"Missing required arguments; Expected either {variations} arguments to be given" - else: - # TODO: this error message is not deterministic - missing = list(set(variants[0]) - given_params) - if len(missing) > 1: - msg = f"Missing required arguments: {human_join([quote(arg) for arg in missing])}" - else: - msg = f"Missing required argument: {quote(missing[0])}" - raise TypeError(msg) - return func(*args, **kwargs) - - return wrapper # type: ignore - - return inner - - -_K = TypeVar("_K") -_V = TypeVar("_V") - - -@overload -def strip_not_given(obj: None) -> None: ... - - -@overload -def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: ... - - -@overload -def strip_not_given(obj: object) -> object: ... - - -def strip_not_given(obj: object | None) -> object: - """Remove all top-level keys where their values are instances of `NotGiven`""" - if obj is None: - return None - - if not is_mapping(obj): - return obj - - return {key: value for key, value in obj.items() if not isinstance(value, NotGiven)} - - -def coerce_integer(val: str) -> int: - return int(val, base=10) - - -def coerce_float(val: str) -> float: - return float(val) - - -def coerce_boolean(val: str) -> bool: - return val in {"true", "1", "on"} - - -def maybe_coerce_integer(val: str | None) -> int | None: - if val is None: - return None - return coerce_integer(val) - - -def maybe_coerce_float(val: str | None) -> float | None: - if val is None: - return None - return coerce_float(val) - - -def maybe_coerce_boolean(val: str | None) -> bool | None: - if val is None: - return None - return coerce_boolean(val) - - -def removeprefix(string: str, prefix: str) -> str: - """Remove a prefix from a string. - - Backport of `str.removeprefix` for Python < 3.9 - """ - if string.startswith(prefix): - return string[len(prefix) :] - return string - - -def removesuffix(string: str, suffix: str) -> str: - """Remove a suffix from a string. - - Backport of `str.removesuffix` for Python < 3.9 - """ - if string.endswith(suffix): - return string[: -len(suffix)] - return string - - -def file_from_path(path: str) -> FileTypes: - contents = Path(path).read_bytes() - file_name = os.path.basename(path) - return (file_name, contents) - - -def get_required_header(headers: HeadersLike, header: str) -> str: - lower_header = header.lower() - if isinstance(headers, Mapping): - headers = cast(Headers, headers) - for k, v in headers.items(): - if k.lower() == lower_header and isinstance(v, str): - return v - - """ to deal with the case where the header looks like Stainless-Event-Id """ - intercaps_header = re.sub(r"([^\w])(\w)", lambda pat: pat.group(1) + pat.group(2).upper(), header.capitalize()) - - for normalized_header in [header, lower_header, header.upper(), intercaps_header]: - value = headers.get(normalized_header) - if value: - return value - - raise ValueError(f"Could not find {header} header") - - -def get_async_library() -> str: - try: - return sniffio.current_async_library() - except Exception: - return "false" - - -def drop_prefix_image_data(content: Union[str, list[dict]]) -> Union[str, list[dict]]: - """ - 删除 ;base64, 前缀 - :param image_data: - :return: - """ - if isinstance(content, list): - for data in content: - if data.get("type") == "image_url": - image_data = data.get("image_url").get("url") - if image_data.startswith("data:image/"): - image_data = image_data.split("base64,")[-1] - data["image_url"]["url"] = image_data - - return content diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/logs.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/logs.py deleted file mode 100644 index e5fce94c00..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/logs.py +++ /dev/null @@ -1,78 +0,0 @@ -import logging -import os -import time - -logger = logging.getLogger(__name__) - - -class LoggerNameFilter(logging.Filter): - def filter(self, record): - # return record.name.startswith("loom_core") or record.name in "ERROR" or ( - # record.name.startswith("uvicorn.error") - # and record.getMessage().startswith("Uvicorn running on") - # ) - return True - - -def get_log_file(log_path: str, sub_dir: str): - """ - sub_dir should contain a timestamp. - """ - log_dir = os.path.join(log_path, sub_dir) - # Here should be creating a new directory each time, so `exist_ok=False` - os.makedirs(log_dir, exist_ok=False) - return os.path.join(log_dir, "zhipuai.log") - - -def get_config_dict(log_level: str, log_file_path: str, log_backup_count: int, log_max_bytes: int) -> dict: - # for windows, the path should be a raw string. - log_file_path = log_file_path.encode("unicode-escape").decode() if os.name == "nt" else log_file_path - log_level = log_level.upper() - config_dict = { - "version": 1, - "disable_existing_loggers": False, - "formatters": { - "formatter": {"format": ("%(asctime)s %(name)-12s %(process)d %(levelname)-8s %(message)s")}, - }, - "filters": { - "logger_name_filter": { - "()": __name__ + ".LoggerNameFilter", - }, - }, - "handlers": { - "stream_handler": { - "class": "logging.StreamHandler", - "formatter": "formatter", - "level": log_level, - # "stream": "ext://sys.stdout", - # "filters": ["logger_name_filter"], - }, - "file_handler": { - "class": "logging.handlers.RotatingFileHandler", - "formatter": "formatter", - "level": log_level, - "filename": log_file_path, - "mode": "a", - "maxBytes": log_max_bytes, - "backupCount": log_backup_count, - "encoding": "utf8", - }, - }, - "loggers": { - "loom_core": { - "handlers": ["stream_handler", "file_handler"], - "level": log_level, - "propagate": False, - } - }, - "root": { - "level": log_level, - "handlers": ["stream_handler", "file_handler"], - }, - } - return config_dict - - -def get_timestamp_ms(): - t = time.time() - return int(round(t * 1000)) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/pagination.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/pagination.py deleted file mode 100644 index 7f0b1b91d9..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/pagination.py +++ /dev/null @@ -1,62 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Any, Generic, Optional, TypeVar, cast - -from typing_extensions import Protocol, override, runtime_checkable - -from ._http_client import BasePage, BaseSyncPage, PageInfo - -__all__ = ["SyncPage", "SyncCursorPage"] - -_T = TypeVar("_T") - - -@runtime_checkable -class CursorPageItem(Protocol): - id: Optional[str] - - -class SyncPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]): - """Note: no pagination actually occurs yet, this is for forwards-compatibility.""" - - data: list[_T] - object: str - - @override - def _get_page_items(self) -> list[_T]: - data = self.data - if not data: - return [] - return data - - @override - def next_page_info(self) -> None: - """ - This page represents a response that isn't actually paginated at the API level - so there will never be a next page. - """ - return None - - -class SyncCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]): - data: list[_T] - - @override - def _get_page_items(self) -> list[_T]: - data = self.data - if not data: - return [] - return data - - @override - def next_page_info(self) -> Optional[PageInfo]: - data = self.data - if not data: - return None - - item = cast(Any, data[-1]) - if not isinstance(item, CursorPageItem) or item.id is None: - # TODO emit warning log - return None - - return PageInfo(params={"after": item.id}) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/__init__.py deleted file mode 100644 index 9f941fb91c..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .assistant_completion import AssistantCompletion - -__all__ = [ - "AssistantCompletion", -] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_completion.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_completion.py deleted file mode 100644 index cbfb6edaeb..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_completion.py +++ /dev/null @@ -1,40 +0,0 @@ -from typing import Any, Optional - -from ...core import BaseModel -from .message import MessageContent - -__all__ = ["AssistantCompletion", "CompletionUsage"] - - -class ErrorInfo(BaseModel): - code: str # 错误码 - message: str # 错误信息 - - -class AssistantChoice(BaseModel): - index: int # 结果下标 - delta: MessageContent # 当前会话输出消息体 - finish_reason: str - """ - # 推理结束原因 stop代表推理自然结束或触发停止词。 sensitive 代表模型推理内容被安全审核接口拦截。请注意,针对此类内容,请用户自行判断并决定是否撤回已公开的内容。 - # network_error 代表模型推理服务异常。 - """ # noqa: E501 - metadata: dict # 元信息,拓展字段 - - -class CompletionUsage(BaseModel): - prompt_tokens: int # 输入的 tokens 数量 - completion_tokens: int # 输出的 tokens 数量 - total_tokens: int # 总 tokens 数量 - - -class AssistantCompletion(BaseModel): - id: str # 请求 ID - conversation_id: str # 会话 ID - assistant_id: str # 智能体 ID - created: int # 请求创建时间,Unix 时间戳 - status: str # 返回状态,包括:`completed` 表示生成结束`in_progress`表示生成中 `failed` 表示生成异常 - last_error: Optional[ErrorInfo] # 异常信息 - choices: list[AssistantChoice] # 增量返回的信息 - metadata: Optional[dict[str, Any]] # 元信息,拓展字段 - usage: Optional[CompletionUsage] # tokens 数量统计 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_params.py deleted file mode 100644 index 03f14f4238..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_params.py +++ /dev/null @@ -1,7 +0,0 @@ -from typing import TypedDict - - -class ConversationParameters(TypedDict, total=False): - assistant_id: str # 智能体 ID - page: int # 当前分页 - page_size: int # 分页数量 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_resp.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_resp.py deleted file mode 100644 index d1833d220a..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_conversation_resp.py +++ /dev/null @@ -1,29 +0,0 @@ -from ...core import BaseModel - -__all__ = ["ConversationUsageListResp"] - - -class Usage(BaseModel): - prompt_tokens: int # 用户输入的 tokens 数量 - completion_tokens: int # 模型输入的 tokens 数量 - total_tokens: int # 总 tokens 数量 - - -class ConversationUsage(BaseModel): - id: str # 会话 id - assistant_id: str # 智能体Assistant id - create_time: int # 创建时间 - update_time: int # 更新时间 - usage: Usage # 会话中 tokens 数量统计 - - -class ConversationUsageList(BaseModel): - assistant_id: str # 智能体id - has_more: bool # 是否还有更多页 - conversation_list: list[ConversationUsage] # 返回的 - - -class ConversationUsageListResp(BaseModel): - code: int - msg: str - data: ConversationUsageList diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_create_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_create_params.py deleted file mode 100644 index 2def1025cd..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_create_params.py +++ /dev/null @@ -1,32 +0,0 @@ -from typing import Optional, TypedDict, Union - - -class AssistantAttachments: - file_id: str - - -class MessageTextContent: - type: str # 目前支持 type = text - text: str - - -MessageContent = Union[MessageTextContent] - - -class ConversationMessage(TypedDict): - """会话消息体""" - - role: str # 用户的输入角色,例如 'user' - content: list[MessageContent] # 会话消息体的内容 - - -class AssistantParameters(TypedDict, total=False): - """智能体参数类""" - - assistant_id: str # 智能体 ID - conversation_id: Optional[str] # 会话 ID,不传则创建新会话 - model: str # 模型名称,默认为 'GLM-4-Assistant' - stream: bool # 是否支持流式 SSE,需要传入 True - messages: list[ConversationMessage] # 会话消息体 - attachments: Optional[list[AssistantAttachments]] # 会话指定的文件,非必填 - metadata: Optional[dict] # 元信息,拓展字段,非必填 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_support_resp.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_support_resp.py deleted file mode 100644 index 0709cdbcad..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/assistant_support_resp.py +++ /dev/null @@ -1,21 +0,0 @@ -from ...core import BaseModel - -__all__ = ["AssistantSupportResp"] - - -class AssistantSupport(BaseModel): - assistant_id: str # 智能体的 Assistant id,用于智能体会话 - created_at: int # 创建时间 - updated_at: int # 更新时间 - name: str # 智能体名称 - avatar: str # 智能体头像 - description: str # 智能体描述 - status: str # 智能体状态,目前只有 publish - tools: list[str] # 智能体支持的工具名 - starter_prompts: list[str] # 智能体启动推荐的 prompt - - -class AssistantSupportResp(BaseModel): - code: int - msg: str - data: list[AssistantSupport] # 智能体列表 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/__init__.py deleted file mode 100644 index 562e0151e5..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .message_content import MessageContent - -__all__ = ["MessageContent"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/message_content.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/message_content.py deleted file mode 100644 index 6a1a438a6f..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/message_content.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Annotated, TypeAlias, Union - -from ....core._utils import PropertyInfo -from .text_content_block import TextContentBlock -from .tools_delta_block import ToolsDeltaBlock - -__all__ = ["MessageContent"] - - -MessageContent: TypeAlias = Annotated[ - Union[ToolsDeltaBlock, TextContentBlock], - PropertyInfo(discriminator="type"), -] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/text_content_block.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/text_content_block.py deleted file mode 100644 index 865fb1139e..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/text_content_block.py +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Literal - -from ....core import BaseModel - -__all__ = ["TextContentBlock"] - - -class TextContentBlock(BaseModel): - content: str - - role: str = "assistant" - - type: Literal["content"] = "content" - """Always `content`.""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/code_interpreter_delta_block.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/code_interpreter_delta_block.py deleted file mode 100644 index 9d569b282e..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/code_interpreter_delta_block.py +++ /dev/null @@ -1,27 +0,0 @@ -from typing import Literal - -__all__ = ["CodeInterpreterToolBlock"] - -from .....core import BaseModel - - -class CodeInterpreterToolOutput(BaseModel): - """代码工具输出结果""" - - type: str # 代码执行日志,目前只有 logs - logs: str # 代码执行的日志结果 - error_msg: str # 错误信息 - - -class CodeInterpreter(BaseModel): - """代码解释器""" - - input: str # 生成的代码片段,输入给代码沙盒 - outputs: list[CodeInterpreterToolOutput] # 代码执行后的输出结果 - - -class CodeInterpreterToolBlock(BaseModel): - """代码工具块""" - - code_interpreter: CodeInterpreter # 代码解释器对象 - type: Literal["code_interpreter"] # 调用工具的类型,始终为 `code_interpreter` diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/drawing_tool_delta_block.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/drawing_tool_delta_block.py deleted file mode 100644 index 0b6895556b..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/drawing_tool_delta_block.py +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Literal - -from .....core import BaseModel - -__all__ = ["DrawingToolBlock"] - - -class DrawingToolOutput(BaseModel): - image: str - - -class DrawingTool(BaseModel): - input: str - outputs: list[DrawingToolOutput] - - -class DrawingToolBlock(BaseModel): - drawing_tool: DrawingTool - - type: Literal["drawing_tool"] - """Always `drawing_tool`.""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/function_delta_block.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/function_delta_block.py deleted file mode 100644 index c439bc4b3f..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/function_delta_block.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import Literal, Union - -__all__ = ["FunctionToolBlock"] - -from .....core import BaseModel - - -class FunctionToolOutput(BaseModel): - content: str - - -class FunctionTool(BaseModel): - name: str - arguments: Union[str, dict] - outputs: list[FunctionToolOutput] - - -class FunctionToolBlock(BaseModel): - function: FunctionTool - - type: Literal["function"] - """Always `drawing_tool`.""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/retrieval_delta_black.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/retrieval_delta_black.py deleted file mode 100644 index 4789e9378a..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/retrieval_delta_black.py +++ /dev/null @@ -1,41 +0,0 @@ -from typing import Literal - -from .....core import BaseModel - - -class RetrievalToolOutput(BaseModel): - """ - This class represents the output of a retrieval tool. - - Attributes: - - text (str): The text snippet retrieved from the knowledge base. - - document (str): The name of the document from which the text snippet was retrieved, returned only in intelligent configuration. - """ # noqa: E501 - - text: str - document: str - - -class RetrievalTool(BaseModel): - """ - This class represents the outputs of a retrieval tool. - - Attributes: - - outputs (List[RetrievalToolOutput]): A list of text snippets and their respective document names retrieved from the knowledge base. - """ # noqa: E501 - - outputs: list[RetrievalToolOutput] - - -class RetrievalToolBlock(BaseModel): - """ - This class represents a block for invoking the retrieval tool. - - Attributes: - - retrieval (RetrievalTool): An instance of the RetrievalTool class containing the retrieval outputs. - - type (Literal["retrieval"]): The type of tool being used, always set to "retrieval". - """ - - retrieval: RetrievalTool - type: Literal["retrieval"] - """Always `retrieval`.""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/tools_type.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/tools_type.py deleted file mode 100644 index 98544053d4..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/tools_type.py +++ /dev/null @@ -1,16 +0,0 @@ -from typing import Annotated, TypeAlias, Union - -from .....core._utils import PropertyInfo -from .code_interpreter_delta_block import CodeInterpreterToolBlock -from .drawing_tool_delta_block import DrawingToolBlock -from .function_delta_block import FunctionToolBlock -from .retrieval_delta_black import RetrievalToolBlock -from .web_browser_delta_block import WebBrowserToolBlock - -__all__ = ["ToolsType"] - - -ToolsType: TypeAlias = Annotated[ - Union[DrawingToolBlock, CodeInterpreterToolBlock, WebBrowserToolBlock, RetrievalToolBlock, FunctionToolBlock], - PropertyInfo(discriminator="type"), -] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/web_browser_delta_block.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/web_browser_delta_block.py deleted file mode 100644 index 966e6fe0c8..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools/web_browser_delta_block.py +++ /dev/null @@ -1,48 +0,0 @@ -from typing import Literal - -from .....core import BaseModel - -__all__ = ["WebBrowserToolBlock"] - - -class WebBrowserOutput(BaseModel): - """ - This class represents the output of a web browser search result. - - Attributes: - - title (str): The title of the search result. - - link (str): The URL link to the search result's webpage. - - content (str): The textual content extracted from the search result. - - error_msg (str): Any error message encountered during the search or retrieval process. - """ - - title: str - link: str - content: str - error_msg: str - - -class WebBrowser(BaseModel): - """ - This class represents the input and outputs of a web browser search. - - Attributes: - - input (str): The input query for the web browser search. - - outputs (List[WebBrowserOutput]): A list of search results returned by the web browser. - """ - - input: str - outputs: list[WebBrowserOutput] - - -class WebBrowserToolBlock(BaseModel): - """ - This class represents a block for invoking the web browser tool. - - Attributes: - - web_browser (WebBrowser): An instance of the WebBrowser class containing the search input and outputs. - - type (Literal["web_browser"]): The type of tool being used, always set to "web_browser". - """ - - web_browser: WebBrowser - type: Literal["web_browser"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools_delta_block.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools_delta_block.py deleted file mode 100644 index 781a1ab819..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/assistant/message/tools_delta_block.py +++ /dev/null @@ -1,16 +0,0 @@ -from typing import Literal - -from ....core import BaseModel -from .tools.tools_type import ToolsType - -__all__ = ["ToolsDeltaBlock"] - - -class ToolsDeltaBlock(BaseModel): - tool_calls: list[ToolsType] - """The index of the content part in the message.""" - - role: str = "tool" - - type: Literal["tool_calls"] = "tool_calls" - """Always `tool_calls`.""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch.py deleted file mode 100644 index 560562915c..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch.py +++ /dev/null @@ -1,82 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -import builtins -from typing import Literal, Optional - -from ..core import BaseModel -from .batch_error import BatchError -from .batch_request_counts import BatchRequestCounts - -__all__ = ["Batch", "Errors"] - - -class Errors(BaseModel): - data: Optional[list[BatchError]] = None - - object: Optional[str] = None - """这个类型,一直是`list`。""" - - -class Batch(BaseModel): - id: str - - completion_window: str - """用于执行请求的地址信息。""" - - created_at: int - """这是 Unix timestamp (in seconds) 表示的创建时间。""" - - endpoint: str - """这是ZhipuAI endpoint的地址。""" - - input_file_id: str - """标记为batch的输入文件的ID。""" - - object: Literal["batch"] - """这个类型,一直是`batch`.""" - - status: Literal[ - "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled" - ] - """batch 的状态。""" - - cancelled_at: Optional[int] = None - """Unix timestamp (in seconds) 表示的取消时间。""" - - cancelling_at: Optional[int] = None - """Unix timestamp (in seconds) 表示发起取消的请求时间 """ - - completed_at: Optional[int] = None - """Unix timestamp (in seconds) 表示的完成时间。""" - - error_file_id: Optional[str] = None - """这个文件id包含了执行请求失败的请求的输出。""" - - errors: Optional[Errors] = None - - expired_at: Optional[int] = None - """Unix timestamp (in seconds) 表示的将在过期时间。""" - - expires_at: Optional[int] = None - """Unix timestamp (in seconds) 触发过期""" - - failed_at: Optional[int] = None - """Unix timestamp (in seconds) 表示的失败时间。""" - - finalizing_at: Optional[int] = None - """Unix timestamp (in seconds) 表示的最终时间。""" - - in_progress_at: Optional[int] = None - """Unix timestamp (in seconds) 表示的开始处理时间。""" - - metadata: Optional[builtins.object] = None - """ - key:value形式的元数据,以便将信息存储 - 结构化格式。键的长度是64个字符,值最长512个字符 - """ - - output_file_id: Optional[str] = None - """完成请求的输出文件的ID。""" - - request_counts: Optional[BatchRequestCounts] = None - """批次中不同状态的请求计数""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_create_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_create_params.py deleted file mode 100644 index 3dae65ea46..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_create_params.py +++ /dev/null @@ -1,37 +0,0 @@ -from __future__ import annotations - -from typing import Literal, Optional - -from typing_extensions import Required, TypedDict - -__all__ = ["BatchCreateParams"] - - -class BatchCreateParams(TypedDict, total=False): - completion_window: Required[str] - """The time frame within which the batch should be processed. - - Currently only `24h` is supported. - """ - - endpoint: Required[Literal["/v1/chat/completions", "/v1/embeddings"]] - """The endpoint to be used for all requests in the batch. - - Currently `/v1/chat/completions` and `/v1/embeddings` are supported. - """ - - input_file_id: Required[str] - """The ID of an uploaded file that contains requests for the new batch. - - See [upload file](https://platform.openai.com/docs/api-reference/files/create) - for how to upload a file. - - Your input file must be formatted as a - [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput), - and must be uploaded with the purpose `batch`. - """ - - metadata: Optional[dict[str, str]] - """Optional custom metadata for the batch.""" - - auto_delete_input_file: Optional[bool] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_error.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_error.py deleted file mode 100644 index f934db1978..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_error.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..core import BaseModel - -__all__ = ["BatchError"] - - -class BatchError(BaseModel): - code: Optional[str] = None - """定义的业务错误码""" - - line: Optional[int] = None - """文件中的行号""" - - message: Optional[str] = None - """关于对话文件中的错误的描述""" - - param: Optional[str] = None - """参数名称,如果有的话""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_list_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_list_params.py deleted file mode 100644 index 1a68167132..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_list_params.py +++ /dev/null @@ -1,20 +0,0 @@ -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["BatchListParams"] - - -class BatchListParams(TypedDict, total=False): - after: str - """分页的游标,用于获取下一页的数据。 - - `after` 是一个指向当前页面的游标,用于获取下一页的数据。如果没有提供 `after`,则返回第一页的数据。 - list. - """ - - limit: int - """这个参数用于限制返回的结果数量。 - - Limit 用于限制返回的结果数量。默认值为 10 - """ diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_request_counts.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_request_counts.py deleted file mode 100644 index ca3ccae625..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/batch_request_counts.py +++ /dev/null @@ -1,14 +0,0 @@ -from ..core import BaseModel - -__all__ = ["BatchRequestCounts"] - - -class BatchRequestCounts(BaseModel): - completed: int - """这个数字表示已经完成的请求。""" - - failed: int - """这个数字表示失败的请求。""" - - total: int - """这个数字表示总的请求。""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/async_chat_completion.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/async_chat_completion.py deleted file mode 100644 index c1eed070f3..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/async_chat_completion.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import Optional - -from ...core import BaseModel -from .chat_completion import CompletionChoice, CompletionUsage - -__all__ = ["AsyncTaskStatus", "AsyncCompletion"] - - -class AsyncTaskStatus(BaseModel): - id: Optional[str] = None - request_id: Optional[str] = None - model: Optional[str] = None - task_status: Optional[str] = None - - -class AsyncCompletion(BaseModel): - id: Optional[str] = None - request_id: Optional[str] = None - model: Optional[str] = None - task_status: str - choices: list[CompletionChoice] - usage: CompletionUsage diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion.py deleted file mode 100644 index 1945a826cd..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion.py +++ /dev/null @@ -1,43 +0,0 @@ -from typing import Optional - -from ...core import BaseModel - -__all__ = ["Completion", "CompletionUsage"] - - -class Function(BaseModel): - arguments: str - name: str - - -class CompletionMessageToolCall(BaseModel): - id: str - function: Function - type: str - - -class CompletionMessage(BaseModel): - content: Optional[str] = None - role: str - tool_calls: Optional[list[CompletionMessageToolCall]] = None - - -class CompletionUsage(BaseModel): - prompt_tokens: int - completion_tokens: int - total_tokens: int - - -class CompletionChoice(BaseModel): - index: int - finish_reason: str - message: CompletionMessage - - -class Completion(BaseModel): - model: Optional[str] = None - created: Optional[int] = None - choices: list[CompletionChoice] - request_id: Optional[str] = None - id: Optional[str] = None - usage: CompletionUsage diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion_chunk.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion_chunk.py deleted file mode 100644 index 27fad0008a..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completion_chunk.py +++ /dev/null @@ -1,57 +0,0 @@ -from typing import Any, Optional - -from ...core import BaseModel - -__all__ = [ - "CompletionUsage", - "ChatCompletionChunk", - "Choice", - "ChoiceDelta", - "ChoiceDeltaFunctionCall", - "ChoiceDeltaToolCall", - "ChoiceDeltaToolCallFunction", -] - - -class ChoiceDeltaFunctionCall(BaseModel): - arguments: Optional[str] = None - name: Optional[str] = None - - -class ChoiceDeltaToolCallFunction(BaseModel): - arguments: Optional[str] = None - name: Optional[str] = None - - -class ChoiceDeltaToolCall(BaseModel): - index: int - id: Optional[str] = None - function: Optional[ChoiceDeltaToolCallFunction] = None - type: Optional[str] = None - - -class ChoiceDelta(BaseModel): - content: Optional[str] = None - role: Optional[str] = None - tool_calls: Optional[list[ChoiceDeltaToolCall]] = None - - -class Choice(BaseModel): - delta: ChoiceDelta - finish_reason: Optional[str] = None - index: int - - -class CompletionUsage(BaseModel): - prompt_tokens: int - completion_tokens: int - total_tokens: int - - -class ChatCompletionChunk(BaseModel): - id: Optional[str] = None - choices: list[Choice] - created: Optional[int] = None - model: Optional[str] = None - usage: Optional[CompletionUsage] = None - extra_json: dict[str, Any] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completions_create_param.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completions_create_param.py deleted file mode 100644 index 6ee4dc4794..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/chat_completions_create_param.py +++ /dev/null @@ -1,8 +0,0 @@ -from typing import Optional - -from typing_extensions import TypedDict - - -class Reference(TypedDict, total=False): - enable: Optional[bool] - search_query: Optional[str] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/code_geex/code_geex_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/code_geex/code_geex_params.py deleted file mode 100644 index 666b38855c..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/chat/code_geex/code_geex_params.py +++ /dev/null @@ -1,146 +0,0 @@ -from typing import Literal, Optional - -from typing_extensions import Required, TypedDict - -__all__ = [ - "CodeGeexTarget", - "CodeGeexContext", - "CodeGeexExtra", -] - - -class CodeGeexTarget(TypedDict, total=False): - """补全的内容参数""" - - path: Optional[str] - """文件路径""" - language: Required[ - Literal[ - "c", - "c++", - "cpp", - "c#", - "csharp", - "c-sharp", - "css", - "cuda", - "dart", - "lua", - "objectivec", - "objective-c", - "objective-c++", - "python", - "perl", - "prolog", - "swift", - "lisp", - "java", - "scala", - "tex", - "jsx", - "tsx", - "vue", - "markdown", - "html", - "php", - "js", - "javascript", - "typescript", - "go", - "shell", - "rust", - "sql", - "kotlin", - "vb", - "ruby", - "pascal", - "r", - "fortran", - "lean", - "matlab", - "delphi", - "scheme", - "basic", - "assembly", - "groovy", - "abap", - "gdscript", - "haskell", - "julia", - "elixir", - "excel", - "clojure", - "actionscript", - "solidity", - "powershell", - "erlang", - "cobol", - "alloy", - "awk", - "thrift", - "sparql", - "augeas", - "cmake", - "f-sharp", - "stan", - "isabelle", - "dockerfile", - "rmarkdown", - "literate-agda", - "tcl", - "glsl", - "antlr", - "verilog", - "racket", - "standard-ml", - "elm", - "yaml", - "smalltalk", - "ocaml", - "idris", - "visual-basic", - "protocol-buffer", - "bluespec", - "applescript", - "makefile", - "tcsh", - "maple", - "systemverilog", - "literate-coffeescript", - "vhdl", - "restructuredtext", - "sas", - "literate-haskell", - "java-server-pages", - "coffeescript", - "emacs-lisp", - "mathematica", - "xslt", - "zig", - "common-lisp", - "stata", - "agda", - "ada", - ] - ] - """代码语言类型,如python""" - code_prefix: Required[str] - """补全位置的前文""" - code_suffix: Required[str] - """补全位置的后文""" - - -class CodeGeexContext(TypedDict, total=False): - """附加代码""" - - path: Required[str] - """附加代码文件的路径""" - code: Required[str] - """附加的代码内容""" - - -class CodeGeexExtra(TypedDict, total=False): - target: Required[CodeGeexTarget] - """补全的内容参数""" - contexts: Optional[list[CodeGeexContext]] - """附加代码""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/embeddings.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/embeddings.py deleted file mode 100644 index 8425b5c866..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/embeddings.py +++ /dev/null @@ -1,21 +0,0 @@ -from __future__ import annotations - -from typing import Optional - -from ..core import BaseModel -from .chat.chat_completion import CompletionUsage - -__all__ = ["Embedding", "EmbeddingsResponded"] - - -class Embedding(BaseModel): - object: str - index: Optional[int] = None - embedding: list[float] - - -class EmbeddingsResponded(BaseModel): - object: str - data: list[Embedding] - model: str - usage: CompletionUsage diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/__init__.py deleted file mode 100644 index bbaf59e4d7..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .file_deleted import FileDeleted -from .file_object import FileObject, ListOfFileObject -from .upload_detail import UploadDetail - -__all__ = ["FileObject", "ListOfFileObject", "UploadDetail", "FileDeleted"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_create_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_create_params.py deleted file mode 100644 index 4ef93b1c05..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_create_params.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import annotations - -from typing import Literal, Optional - -from typing_extensions import Required, TypedDict - -__all__ = ["FileCreateParams"] - -from ...core import FileTypes -from . import UploadDetail - - -class FileCreateParams(TypedDict, total=False): - file: FileTypes - """file和 upload_detail二选一必填""" - - upload_detail: list[UploadDetail] - """file和 upload_detail二选一必填""" - - purpose: Required[Literal["fine-tune", "retrieval", "batch"]] - """ - 上传文件的用途,支持 "fine-tune和 "retrieval" - retrieval支持上传Doc、Docx、PDF、Xlsx、URL类型文件,且单个文件的大小不超过 5MB。 - fine-tune支持上传.jsonl文件且当前单个文件的大小最大可为 100 MB ,文件中语料格式需满足微调指南中所描述的格式。 - """ - custom_separator: Optional[list[str]] - """ - 当 purpose 为 retrieval 且文件类型为 pdf, url, docx 时上传,切片规则默认为 `\n`。 - """ - knowledge_id: str - """ - 当文件上传目的为 retrieval 时,需要指定知识库ID进行上传。 - """ - - sentence_size: int - """ - 当文件上传目的为 retrieval 时,需要指定知识库ID进行上传。 - """ diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_deleted.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_deleted.py deleted file mode 100644 index a384b1a69a..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_deleted.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Literal - -from ...core import BaseModel - -__all__ = ["FileDeleted"] - - -class FileDeleted(BaseModel): - id: str - - deleted: bool - - object: Literal["file"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_object.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_object.py deleted file mode 100644 index 8f9d0fbb8e..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/file_object.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import Optional - -from ...core import BaseModel - -__all__ = ["FileObject", "ListOfFileObject"] - - -class FileObject(BaseModel): - id: Optional[str] = None - bytes: Optional[int] = None - created_at: Optional[int] = None - filename: Optional[str] = None - object: Optional[str] = None - purpose: Optional[str] = None - status: Optional[str] = None - status_details: Optional[str] = None - - -class ListOfFileObject(BaseModel): - object: Optional[str] = None - data: list[FileObject] - has_more: Optional[bool] = None diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/upload_detail.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/upload_detail.py deleted file mode 100644 index 8f1ca5ce57..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/files/upload_detail.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Optional - -from ...core import BaseModel - - -class UploadDetail(BaseModel): - url: str - knowledge_type: int - file_name: Optional[str] = None - sentence_size: Optional[int] = None - custom_separator: Optional[list[str]] = None - callback_url: Optional[str] = None - callback_header: Optional[dict[str, str]] = None diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/__init__.py deleted file mode 100644 index 416f516ef7..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from __future__ import annotations - -from .fine_tuning_job import FineTuningJob, ListOfFineTuningJob -from .fine_tuning_job_event import FineTuningJobEvent diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job.py deleted file mode 100644 index 75c7553dbe..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job.py +++ /dev/null @@ -1,51 +0,0 @@ -from typing import Optional, Union - -from ...core import BaseModel - -__all__ = ["FineTuningJob", "Error", "Hyperparameters", "ListOfFineTuningJob"] - - -class Error(BaseModel): - code: str - message: str - param: Optional[str] = None - - -class Hyperparameters(BaseModel): - n_epochs: Union[str, int, None] = None - - -class FineTuningJob(BaseModel): - id: Optional[str] = None - - request_id: Optional[str] = None - - created_at: Optional[int] = None - - error: Optional[Error] = None - - fine_tuned_model: Optional[str] = None - - finished_at: Optional[int] = None - - hyperparameters: Optional[Hyperparameters] = None - - model: Optional[str] = None - - object: Optional[str] = None - - result_files: list[str] - - status: str - - trained_tokens: Optional[int] = None - - training_file: str - - validation_file: Optional[str] = None - - -class ListOfFineTuningJob(BaseModel): - object: Optional[str] = None - data: list[FineTuningJob] - has_more: Optional[bool] = None diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job_event.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job_event.py deleted file mode 100644 index f996cff114..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/fine_tuning_job_event.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Optional, Union - -from ...core import BaseModel - -__all__ = ["FineTuningJobEvent", "Metric", "JobEvent"] - - -class Metric(BaseModel): - epoch: Optional[Union[str, int, float]] = None - current_steps: Optional[int] = None - total_steps: Optional[int] = None - elapsed_time: Optional[str] = None - remaining_time: Optional[str] = None - trained_tokens: Optional[int] = None - loss: Optional[Union[str, int, float]] = None - eval_loss: Optional[Union[str, int, float]] = None - acc: Optional[Union[str, int, float]] = None - eval_acc: Optional[Union[str, int, float]] = None - learning_rate: Optional[Union[str, int, float]] = None - - -class JobEvent(BaseModel): - object: Optional[str] = None - id: Optional[str] = None - type: Optional[str] = None - created_at: Optional[int] = None - level: Optional[str] = None - message: Optional[str] = None - data: Optional[Metric] = None - - -class FineTuningJobEvent(BaseModel): - object: Optional[str] = None - data: list[JobEvent] - has_more: Optional[bool] = None diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/job_create_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/job_create_params.py deleted file mode 100644 index e1ebc352bc..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/job_create_params.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - -from typing import Literal, Union - -from typing_extensions import TypedDict - -__all__ = ["Hyperparameters"] - - -class Hyperparameters(TypedDict, total=False): - batch_size: Union[Literal["auto"], int] - - learning_rate_multiplier: Union[Literal["auto"], float] - - n_epochs: Union[Literal["auto"], int] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/__init__.py deleted file mode 100644 index 57d0d2511d..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .fine_tuned_models import FineTunedModelsStatus diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/fine_tuned_models.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/fine_tuned_models.py deleted file mode 100644 index b286a5b577..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/fine_tuning/models/fine_tuned_models.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import ClassVar - -from ....core import PYDANTIC_V2, BaseModel, ConfigDict - -__all__ = ["FineTunedModelsStatus"] - - -class FineTunedModelsStatus(BaseModel): - if PYDANTIC_V2: - model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow", protected_namespaces=()) - request_id: str # 请求id - model_name: str # 模型名称 - delete_status: str # 删除状态 deleting(删除中), deleted (已删除) diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/image.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/image.py deleted file mode 100644 index 3bcad0acab..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/image.py +++ /dev/null @@ -1,18 +0,0 @@ -from __future__ import annotations - -from typing import Optional - -from ..core import BaseModel - -__all__ = ["GeneratedImage", "ImagesResponded"] - - -class GeneratedImage(BaseModel): - b64_json: Optional[str] = None - url: Optional[str] = None - revised_prompt: Optional[str] = None - - -class ImagesResponded(BaseModel): - created: int - data: list[GeneratedImage] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/__init__.py deleted file mode 100644 index 8c81d703e2..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .knowledge import KnowledgeInfo -from .knowledge_used import KnowledgeStatistics, KnowledgeUsed - -__all__ = [ - "KnowledgeInfo", - "KnowledgeStatistics", - "KnowledgeUsed", -] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py deleted file mode 100644 index 59cb41d712..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .document import DocumentData, DocumentFailedInfo, DocumentObject, DocumentSuccessInfo - -__all__ = [ - "DocumentData", - "DocumentObject", - "DocumentSuccessInfo", - "DocumentFailedInfo", -] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py deleted file mode 100644 index 980bc6f4a7..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document.py +++ /dev/null @@ -1,51 +0,0 @@ -from typing import Optional - -from ....core import BaseModel - -__all__ = ["DocumentData", "DocumentObject", "DocumentSuccessInfo", "DocumentFailedInfo"] - - -class DocumentSuccessInfo(BaseModel): - documentId: Optional[str] = None - """文件id""" - filename: Optional[str] = None - """文件名称""" - - -class DocumentFailedInfo(BaseModel): - failReason: Optional[str] = None - """上传失败的原因,包括:文件格式不支持、文件大小超出限制、知识库容量已满、容量上限为 50 万字。""" - filename: Optional[str] = None - """文件名称""" - documentId: Optional[str] = None - """知识库id""" - - -class DocumentObject(BaseModel): - """文档信息""" - - successInfos: Optional[list[DocumentSuccessInfo]] = None - """上传成功的文件信息""" - failedInfos: Optional[list[DocumentFailedInfo]] = None - """上传失败的文件信息""" - - -class DocumentDataFailInfo(BaseModel): - """失败原因""" - - embedding_code: Optional[int] = ( - None # 失败码 10001:知识不可用,知识库空间已达上限 10002:知识不可用,知识库空间已达上限(字数超出限制) - ) - embedding_msg: Optional[str] = None # 失败原因 - - -class DocumentData(BaseModel): - id: str = None # 知识唯一id - custom_separator: list[str] = None # 切片规则 - sentence_size: str = None # 切片大小 - length: int = None # 文件大小(字节) - word_num: int = None # 文件字数 - name: str = None # 文件名 - url: str = None # 文件下载链接 - embedding_stat: int = None # 0:向量化中 1:向量化完成 2:向量化失败 - failInfo: Optional[DocumentDataFailInfo] = None # 失败原因 向量化失败embedding_stat=2的时候 会有此值 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_edit_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_edit_params.py deleted file mode 100644 index 509cb3a451..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_edit_params.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Optional, TypedDict - -__all__ = ["DocumentEditParams"] - - -class DocumentEditParams(TypedDict): - """ - 知识参数类型定义 - - Attributes: - id (str): 知识ID - knowledge_type (int): 知识类型: - 1:文章知识: 支持pdf,url,docx - 2.问答知识-文档: 支持pdf,url,docx - 3.问答知识-表格: 支持xlsx - 4.商品库-表格: 支持xlsx - 5.自定义: 支持pdf,url,docx - custom_separator (Optional[List[str]]): 当前知识类型为自定义(knowledge_type=5)时的切片规则,默认\n - sentence_size (Optional[int]): 当前知识类型为自定义(knowledge_type=5)时的切片字数,取值范围: 20-2000,默认300 - callback_url (Optional[str]): 回调地址 - callback_header (Optional[dict]): 回调时携带的header - """ - - id: str - knowledge_type: int - custom_separator: Optional[list[str]] - sentence_size: Optional[int] - callback_url: Optional[str] - callback_header: Optional[dict[str, str]] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_params.py deleted file mode 100644 index 910c8c045e..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_params.py +++ /dev/null @@ -1,26 +0,0 @@ -from __future__ import annotations - -from typing import Optional - -from typing_extensions import TypedDict - - -class DocumentListParams(TypedDict, total=False): - """ - 文件查询参数类型定义 - - Attributes: - purpose (Optional[str]): 文件用途 - knowledge_id (Optional[str]): 当文件用途为 retrieval 时,需要提供查询的知识库ID - page (Optional[int]): 页,默认1 - limit (Optional[int]): 查询文件列表数,默认10 - after (Optional[str]): 查询指定fileID之后的文件列表(当文件用途为 fine-tune 时需要) - order (Optional[str]): 排序规则,可选值['desc', 'asc'],默认desc(当文件用途为 fine-tune 时需要) - """ - - purpose: Optional[str] - knowledge_id: Optional[str] - page: Optional[int] - limit: Optional[int] - after: Optional[str] - order: Optional[str] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_resp.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_resp.py deleted file mode 100644 index acae4fad9f..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/document/document_list_resp.py +++ /dev/null @@ -1,11 +0,0 @@ -from __future__ import annotations - -from ....core import BaseModel -from . import DocumentData - -__all__ = ["DocumentPage"] - - -class DocumentPage(BaseModel): - list: list[DocumentData] - object: str diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge.py deleted file mode 100644 index bc6f159eb2..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge.py +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Optional - -from ...core import BaseModel - -__all__ = ["KnowledgeInfo"] - - -class KnowledgeInfo(BaseModel): - id: Optional[str] = None - """知识库唯一 id""" - embedding_id: Optional[str] = ( - None # 知识库绑定的向量化模型 见模型列表 [内部服务开放接口文档](https://lslfd0slxc.feishu.cn/docx/YauWdbBiMopV0FxB7KncPWCEn8f#H15NduiQZo3ugmxnWQFcfAHpnQ4) - ) - name: Optional[str] = None # 知识库名称 100字限制 - customer_identifier: Optional[str] = None # 用户标识 长度32位以内 - description: Optional[str] = None # 知识库描述 500字限制 - background: Optional[str] = None # 背景颜色(给枚举)'blue', 'red', 'orange', 'purple', 'sky' - icon: Optional[str] = ( - None # 知识库图标(给枚举) question: 问号、book: 书籍、seal: 印章、wrench: 扳手、tag: 标签、horn: 喇叭、house: 房子 # noqa: E501 - ) - bucket_id: Optional[str] = None # 桶id 限制32位 diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_create_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_create_params.py deleted file mode 100644 index c3da201727..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_create_params.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import annotations - -from typing import Literal, Optional - -from typing_extensions import TypedDict - -__all__ = ["KnowledgeBaseParams"] - - -class KnowledgeBaseParams(TypedDict): - """ - 知识库参数类型定义 - - Attributes: - embedding_id (int): 知识库绑定的向量化模型ID - name (str): 知识库名称,限制100字 - customer_identifier (Optional[str]): 用户标识,长度32位以内 - description (Optional[str]): 知识库描述,限制500字 - background (Optional[Literal['blue', 'red', 'orange', 'purple', 'sky']]): 背景颜色 - icon (Optional[Literal['question', 'book', 'seal', 'wrench', 'tag', 'horn', 'house']]): 知识库图标 - bucket_id (Optional[str]): 桶ID,限制32位 - """ - - embedding_id: int - name: str - customer_identifier: Optional[str] - description: Optional[str] - background: Optional[Literal["blue", "red", "orange", "purple", "sky"]] = None - icon: Optional[Literal["question", "book", "seal", "wrench", "tag", "horn", "house"]] = None - bucket_id: Optional[str] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_params.py deleted file mode 100644 index a221b28e46..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KnowledgeListParams"] - - -class KnowledgeListParams(TypedDict, total=False): - page: int = 1 - """ 页码,默认 1,第一页 - """ - - size: int = 10 - """每页数量 默认10 - """ diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_resp.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_resp.py deleted file mode 100644 index e462eddc55..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_list_resp.py +++ /dev/null @@ -1,11 +0,0 @@ -from __future__ import annotations - -from ...core import BaseModel -from . import KnowledgeInfo - -__all__ = ["KnowledgePage"] - - -class KnowledgePage(BaseModel): - list: list[KnowledgeInfo] - object: str diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_used.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_used.py deleted file mode 100644 index cfda709702..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/knowledge/knowledge_used.py +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Optional - -from ...core import BaseModel - -__all__ = ["KnowledgeStatistics", "KnowledgeUsed"] - - -class KnowledgeStatistics(BaseModel): - """ - 使用量统计 - """ - - word_num: Optional[int] = None - length: Optional[int] = None - - -class KnowledgeUsed(BaseModel): - used: Optional[KnowledgeStatistics] = None - """已使用量""" - total: Optional[KnowledgeStatistics] = None - """知识库总量""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/__init__.py deleted file mode 100644 index c9bd60419c..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .sensitive_word_check import SensitiveWordCheckRequest - -__all__ = ["SensitiveWordCheckRequest"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/sensitive_word_check.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/sensitive_word_check.py deleted file mode 100644 index 0c37d99e65..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/sensitive_word_check/sensitive_word_check.py +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Optional - -from typing_extensions import TypedDict - - -class SensitiveWordCheckRequest(TypedDict, total=False): - type: Optional[str] - """敏感词类型,当前仅支持ALL""" - status: Optional[str] - """敏感词启用禁用状态 - 启用:ENABLE - 禁用:DISABLE - 备注:默认开启敏感词校验,如果要关闭敏感词校验,需联系商务获取对应权限,否则敏感词禁用不生效。 - """ diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/__init__.py deleted file mode 100644 index 62f77344ee..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .web_search import ( - SearchIntent, - SearchRecommend, - SearchResult, - WebSearch, -) -from .web_search_chunk import WebSearchChunk - -__all__ = ["WebSearch", "SearchIntent", "SearchResult", "SearchRecommend", "WebSearchChunk"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/tools_web_search_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/tools_web_search_params.py deleted file mode 100644 index b3a3b26f07..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/tools_web_search_params.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import annotations - -from typing import Optional, Union - -from typing_extensions import TypedDict - -__all__ = ["WebSearchParams"] - - -class WebSearchParams(TypedDict): - """ - 工具名:web-search-pro参数类型定义 - - Attributes: - :param model: str, 模型名称 - :param request_id: Optional[str], 请求ID - :param stream: Optional[bool], 是否流式 - :param messages: Union[str, List[str], List[int], object, None], - 包含历史对话上下文的内容,按照 {"role": "user", "content": "你好"} 的json 数组形式进行传参 - 当前版本仅支持 User Message 单轮对话,工具会理解User Message并进行搜索, - 请尽可能传入不带指令格式的用户原始提问,以提高搜索准确率。 - :param scope: Optional[str], 指定搜索范围,全网、学术等,默认全网 - :param location: Optional[str], 指定搜索用户地区 location 提高相关性 - :param recent_days: Optional[int],支持指定返回 N 天(1-30)更新的搜索结果 - - - """ - - model: str - request_id: Optional[str] - stream: Optional[bool] - messages: Union[str, list[str], list[int], object, None] - scope: Optional[str] = None - location: Optional[str] = None - recent_days: Optional[int] = None diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search.py deleted file mode 100644 index ac9fa3821e..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search.py +++ /dev/null @@ -1,71 +0,0 @@ -from typing import Optional - -from ...core import BaseModel - -__all__ = [ - "WebSearch", - "SearchIntent", - "SearchResult", - "SearchRecommend", -] - - -class SearchIntent(BaseModel): - index: int - # 搜索轮次,默认为 0 - query: str - # 搜索优化 query - intent: str - # 判断的意图类型 - keywords: str - # 搜索关键词 - - -class SearchResult(BaseModel): - index: int - # 搜索轮次,默认为 0 - title: str - # 标题 - link: str - # 链接 - content: str - # 内容 - icon: str - # 图标 - media: str - # 来源媒体 - refer: str - # 角标序号 [ref_1] - - -class SearchRecommend(BaseModel): - index: int - # 搜索轮次,默认为 0 - query: str - # 推荐query - - -class WebSearchMessageToolCall(BaseModel): - id: str - search_intent: Optional[SearchIntent] - search_result: Optional[SearchResult] - search_recommend: Optional[SearchRecommend] - type: str - - -class WebSearchMessage(BaseModel): - role: str - tool_calls: Optional[list[WebSearchMessageToolCall]] = None - - -class WebSearchChoice(BaseModel): - index: int - finish_reason: str - message: WebSearchMessage - - -class WebSearch(BaseModel): - created: Optional[int] = None - choices: list[WebSearchChoice] - request_id: Optional[str] = None - id: Optional[str] = None diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search_chunk.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search_chunk.py deleted file mode 100644 index 7fb0e02bb5..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/tools/web_search_chunk.py +++ /dev/null @@ -1,33 +0,0 @@ -from typing import Optional - -from ...core import BaseModel -from .web_search import SearchIntent, SearchRecommend, SearchResult - -__all__ = ["WebSearchChunk"] - - -class ChoiceDeltaToolCall(BaseModel): - index: int - id: Optional[str] = None - - search_intent: Optional[SearchIntent] = None - search_result: Optional[SearchResult] = None - search_recommend: Optional[SearchRecommend] = None - type: Optional[str] = None - - -class ChoiceDelta(BaseModel): - role: Optional[str] = None - tool_calls: Optional[list[ChoiceDeltaToolCall]] = None - - -class Choice(BaseModel): - delta: ChoiceDelta - finish_reason: Optional[str] = None - index: int - - -class WebSearchChunk(BaseModel): - id: Optional[str] = None - choices: list[Choice] - created: Optional[int] = None diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/__init__.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/__init__.py deleted file mode 100644 index b14072b1a7..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .video_object import VideoObject, VideoResult - -__all__ = ["VideoObject", "VideoResult"] diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_create_params.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_create_params.py deleted file mode 100644 index f5489d708e..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_create_params.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import annotations - -from typing import Optional - -from typing_extensions import TypedDict - -__all__ = ["VideoCreateParams"] - -from ..sensitive_word_check import SensitiveWordCheckRequest - - -class VideoCreateParams(TypedDict, total=False): - model: str - """模型编码""" - prompt: str - """所需视频的文本描述""" - image_url: str - """所需视频的文本描述""" - sensitive_word_check: Optional[SensitiveWordCheckRequest] - """支持 URL 或者 Base64、传入 image 奖进行图生视频 - * 图片格式: - * 图片大小:""" - request_id: str - """由用户端传参,需保证唯一性;用于区分每次请求的唯一标识,用户端不传时平台会默认生成。""" - - user_id: str - """用户端。""" diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_object.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_object.py deleted file mode 100644 index 85c3844d8a..0000000000 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/types/video/video_object.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import Optional - -from ...core import BaseModel - -__all__ = ["VideoObject", "VideoResult"] - - -class VideoResult(BaseModel): - url: str - """视频url""" - cover_image_url: str - """预览图""" - - -class VideoObject(BaseModel): - id: Optional[str] = None - """智谱 AI 开放平台生成的任务订单号,调用请求结果接口时请使用此订单号""" - - model: str - """模型名称""" - - video_result: list[VideoResult] - """视频生成结果""" - - task_status: str - """处理状态,PROCESSING(处理中),SUCCESS(成功),FAIL(失败) - 注:处理中状态需通过查询获取结果""" - - request_id: str - """用户在客户端请求时提交的任务编号或者平台生成的任务编号""" diff --git a/api/core/plugin/entities/plugin_daemon.py b/api/core/plugin/entities/plugin_daemon.py index c5dced121f..554353c6c9 100644 --- a/api/core/plugin/entities/plugin_daemon.py +++ b/api/core/plugin/entities/plugin_daemon.py @@ -1,8 +1,11 @@ +from datetime import datetime from enum import Enum from typing import Generic, Optional, TypeVar -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict, Field +from core.model_runtime.entities.model_entities import AIModelEntity +from core.model_runtime.entities.provider_entities import ProviderEntity from core.tools.entities.tool_entities import ToolProviderEntityWithPlugin T = TypeVar("T", bound=(BaseModel | dict | list | bool)) @@ -22,6 +25,7 @@ class InstallPluginMessage(BaseModel): """ Message for installing a plugin. """ + class Event(Enum): Info = "info" Done = "done" @@ -42,4 +46,44 @@ class PluginBasicBooleanResponse(BaseModel): """ Basic boolean response from plugin daemon. """ - result: bool \ No newline at end of file + + result: bool + + +class PluginModelSchemaEntity(BaseModel): + model_schema: AIModelEntity = Field(description="The model schema.") + + # pydantic configs + model_config = ConfigDict(protected_namespaces=()) + + +class PluginModelProviderEntity(BaseModel): + id: str = Field(alias="ID", description="ID") + created_at: datetime = Field(alias="CreatedAt", description="The created at time of the model provider.") + updated_at: datetime = Field(alias="UpdatedAt", description="The updated at time of the model provider.") + provider: str = Field(description="The provider of the model.") + tenant_id: str = Field(description="The tenant ID.") + plugin_unique_identifier: str = Field(description="The plugin unique identifier.") + plugin_id: str = Field(description="The plugin ID.") + declaration: ProviderEntity = Field(description="The declaration of the model provider.") + + +class PluginNumTokensResponse(BaseModel): + """ + Response for number of tokens. + """ + + num_tokens: int = Field(description="The number of tokens.") + + +class PluginStringResultResponse(BaseModel): + result: str = Field(description="The result of the string.") + + +class PluginVoiceEntity(BaseModel): + name: str = Field(description="The name of the voice.") + value: str = Field(description="The value of the voice.") + + +class PluginVoicesResponse(BaseModel): + voices: list[PluginVoiceEntity] = Field(description="The result of the voices.") diff --git a/api/core/plugin/entities/request.py b/api/core/plugin/entities/request.py index 5fe9ea1dda..af40ebc5ca 100644 --- a/api/core/plugin/entities/request.py +++ b/api/core/plugin/entities/request.py @@ -1,7 +1,7 @@ from collections.abc import Mapping from typing import Any, Literal, Optional -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, ConfigDict, Field, field_validator from core.entities.provider_entities import BasicProviderConfig from core.model_runtime.entities.message_entities import ( @@ -39,6 +39,8 @@ class BaseRequestInvokeModel(BaseModel): model: str model_type: ModelType + model_config = ConfigDict(protected_namespaces=()) + class RequestInvokeLLM(BaseRequestInvokeModel): """ @@ -53,6 +55,8 @@ class RequestInvokeLLM(BaseRequestInvokeModel): stop: Optional[list[str]] = Field(default_factory=list) stream: Optional[bool] = False + model_config = ConfigDict(protected_namespaces=()) + @field_validator("prompt_messages", mode="before") @classmethod def convert_prompt_messages(cls, v): diff --git a/api/core/plugin/manager/asset.py b/api/core/plugin/manager/asset.py index df76f56a6d..fc4a99ad49 100644 --- a/api/core/plugin/manager/asset.py +++ b/api/core/plugin/manager/asset.py @@ -2,11 +2,11 @@ from core.plugin.manager.base import BasePluginManager class PluginAssetManager(BasePluginManager): - def fetch_asset(self, id: str) -> bytes: + def fetch_asset(self, tenant_id: str, id: str) -> bytes: """ Fetch an asset by id. """ - response = self._request(method="GET", path=f"/assets/plugin/{id}") + response = self._request(method="GET", path=f"plugin/{tenant_id}/assets/{id}") if response.status_code != 200: raise ValueError(f"can not found asset {id}") return response.content diff --git a/api/core/plugin/manager/base.py b/api/core/plugin/manager/base.py index fd18b3798e..1e050427d4 100644 --- a/api/core/plugin/manager/base.py +++ b/api/core/plugin/manager/base.py @@ -132,8 +132,19 @@ class BasePluginManager: line_data = json.loads(line) rep = PluginDaemonBasicResponse[type](**line_data) if rep.code != 0: - raise ValueError(f"got error from plugin daemon: {rep.message}, code: {rep.code}") + raise PluginDaemonRespError(rep.message, rep.code) if rep.data is None: raise ValueError("got empty data from plugin daemon") yield rep.data - \ No newline at end of file + + +class PluginDaemonRespError(Exception): + """ + Plugin daemon response error. + """ + + def __init__(self, resp_message: str, code: int): + super().__init__() + self.message = f"got error from plugin daemon: {resp_message}, code: {code}" + self.resp_message = resp_message + self.code = code diff --git a/api/core/plugin/manager/model.py b/api/core/plugin/manager/model.py index 4411d76fe1..e34e9b516e 100644 --- a/api/core/plugin/manager/model.py +++ b/api/core/plugin/manager/model.py @@ -1,13 +1,523 @@ -from core.model_runtime.entities.provider_entities import ProviderEntity -from core.plugin.manager.base import BasePluginManager +import binascii +from collections.abc import Generator, Sequence +from typing import IO, Optional + +from core.model_runtime.entities.llm_entities import LLMResultChunk +from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool +from core.model_runtime.entities.model_entities import AIModelEntity +from core.model_runtime.entities.rerank_entities import RerankResult +from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult +from core.model_runtime.utils.encoders import jsonable_encoder +from core.plugin.entities.plugin_daemon import ( + PluginBasicBooleanResponse, + PluginModelProviderEntity, + PluginModelSchemaEntity, + PluginNumTokensResponse, + PluginStringResultResponse, + PluginVoicesResponse, +) +from core.plugin.manager.base import BasePluginManager, PluginDaemonRespError class PluginModelManager(BasePluginManager): - def fetch_model_providers(self, tenant_id: str) -> list[ProviderEntity]: + def fetch_model_providers(self, tenant_id: str) -> Sequence[PluginModelProviderEntity]: """ Fetch model providers for the given tenant. """ response = self._request_with_plugin_daemon_response( - "GET", f"plugin/{tenant_id}/models", list[ProviderEntity], params={"page": 1, "page_size": 256} + "GET", + f"plugin/{tenant_id}/management/models", + list[PluginModelProviderEntity], + params={"page": 1, "page_size": 256}, ) return response + + def get_model_schema( + self, + tenant_id: str, + user_id: str, + plugin_id: str, + provider: str, + model_type: str, + model: str, + credentials: dict, + ) -> AIModelEntity | None: + """ + Get model schema + """ + response = self._request_with_plugin_daemon_response_stream( + "POST", + f"plugin/{tenant_id}/dispatch/model/schema", + PluginModelSchemaEntity, + data={ + "user_id": user_id, + "data": { + "provider": provider, + "model_type": model_type, + "model": model, + "credentials": credentials, + }, + }, + headers={ + "X-Plugin-ID": plugin_id, + "Content-Type": "application/json", + }, + ) + + for resp in response: + return resp.model_schema + + return None + + def validate_provider_credentials( + self, tenant_id: str, user_id: str, plugin_id: str, provider: str, credentials: dict + ) -> bool: + """ + validate the credentials of the provider + """ + response = self._request_with_plugin_daemon_response_stream( + "POST", + f"plugin/{tenant_id}/dispatch/model/validate_provider_credentials", + PluginBasicBooleanResponse, + data={ + "user_id": user_id, + "data": { + "provider": provider, + "credentials": credentials, + }, + }, + headers={ + "X-Plugin-ID": plugin_id, + "Content-Type": "application/json", + }, + ) + + for resp in response: + return resp.result + + return False + + def validate_model_credentials( + self, + tenant_id: str, + user_id: str, + plugin_id: str, + provider: str, + model_type: str, + model: str, + credentials: dict, + ) -> bool: + """ + validate the credentials of the provider + """ + response = self._request_with_plugin_daemon_response_stream( + "POST", + f"plugin/{tenant_id}/dispatch/model/validate_model_credentials", + PluginBasicBooleanResponse, + data={ + "user_id": user_id, + "data": { + "provider": provider, + "model_type": model_type, + "model": model, + "credentials": credentials, + }, + }, + headers={ + "X-Plugin-ID": plugin_id, + "Content-Type": "application/json", + }, + ) + + for resp in response: + return resp.result + + return False + + def invoke_llm( + self, + tenant_id: str, + user_id: str, + plugin_id: str, + provider: str, + model: str, + credentials: dict, + prompt_messages: list[PromptMessage], + model_parameters: Optional[dict] = None, + tools: Optional[list[PromptMessageTool]] = None, + stop: Optional[list[str]] = None, + stream: bool = True, + ) -> Generator[LLMResultChunk, None, None]: + """ + Invoke llm + """ + response = self._request_with_plugin_daemon_response_stream( + method="POST", + path=f"plugin/{tenant_id}/dispatch/llm/invoke", + type=LLMResultChunk, + data=jsonable_encoder( + { + "user_id": user_id, + "data": { + "provider": provider, + "model_type": "llm", + "model": model, + "credentials": credentials, + "prompt_messages": prompt_messages, + "model_parameters": model_parameters, + "tools": tools, + "stop": stop, + "stream": stream, + }, + } + ), + headers={ + "X-Plugin-ID": plugin_id, + "Content-Type": "application/json", + }, + ) + + try: + yield from response + except PluginDaemonRespError as e: + raise ValueError(e.resp_message + str(e.code)) + + def get_llm_num_tokens( + self, + tenant_id: str, + user_id: str, + plugin_id: str, + provider: str, + model_type: str, + model: str, + credentials: dict, + prompt_messages: list[PromptMessage], + tools: Optional[list[PromptMessageTool]] = None, + ) -> int: + """ + Get number of tokens for llm + """ + response = self._request_with_plugin_daemon_response_stream( + method="POST", + path=f"plugin/{tenant_id}/dispatch/llm/num_tokens", + type=PluginNumTokensResponse, + data=jsonable_encoder( + { + "user_id": user_id, + "data": { + "provider": provider, + "model_type": model_type, + "model": model, + "credentials": credentials, + "prompt_messages": prompt_messages, + "tools": tools, + }, + } + ), + headers={ + "X-Plugin-ID": plugin_id, + "Content-Type": "application/json", + }, + ) + + for resp in response: + return resp.num_tokens + + return 0 + + def invoke_text_embedding( + self, + tenant_id: str, + user_id: str, + plugin_id: str, + provider: str, + model: str, + credentials: dict, + texts: list[str], + ) -> TextEmbeddingResult: + """ + Invoke text embedding + """ + response = self._request_with_plugin_daemon_response_stream( + method="POST", + path=f"plugin/{tenant_id}/dispatch/text_embedding/invoke", + type=TextEmbeddingResult, + data=jsonable_encoder( + { + "user_id": user_id, + "data": { + "provider": provider, + "model_type": "text-embedding", + "model": model, + "credentials": credentials, + "texts": texts, + }, + } + ), + headers={ + "X-Plugin-ID": plugin_id, + "Content-Type": "application/json", + }, + ) + + for resp in response: + return resp + + raise ValueError("Failed to invoke text embedding") + + def get_text_embedding_num_tokens( + self, + tenant_id: str, + user_id: str, + plugin_id: str, + provider: str, + model_type: str, + model: str, + credentials: dict, + texts: list[str], + ) -> int: + """ + Get number of tokens for text embedding + """ + response = self._request_with_plugin_daemon_response_stream( + method="POST", + path=f"plugin/{tenant_id}/dispatch/text_embedding/num_tokens", + type=PluginNumTokensResponse, + data=jsonable_encoder( + { + "user_id": user_id, + "data": { + "provider": provider, + "model_type": model_type, + "model": model, + "credentials": credentials, + "texts": texts, + }, + } + ), + headers={ + "X-Plugin-ID": plugin_id, + "Content-Type": "application/json", + }, + ) + + for resp in response: + return resp.num_tokens + + return 0 + + def invoke_rerank( + self, + tenant_id: str, + user_id: str, + plugin_id: str, + provider: str, + model_type: str, + model: str, + credentials: dict, + query: str, + docs: list[str], + score_threshold: Optional[float] = None, + top_n: Optional[int] = None, + ) -> RerankResult: + """ + Invoke rerank + """ + response = self._request_with_plugin_daemon_response_stream( + method="POST", + path=f"plugin/{tenant_id}/dispatch/rerank/invoke", + type=RerankResult, + data=jsonable_encoder( + { + "user_id": user_id, + "data": { + "provider": provider, + "model_type": model_type, + "model": model, + "credentials": credentials, + "query": query, + "docs": docs, + "score_threshold": score_threshold, + "top_n": top_n, + }, + } + ), + headers={ + "X-Plugin-ID": plugin_id, + "Content-Type": "application/json", + }, + ) + + for resp in response: + return resp + + raise ValueError("Failed to invoke rerank") + + def invoke_tts( + self, + tenant_id: str, + user_id: str, + plugin_id: str, + provider: str, + model_type: str, + model: str, + credentials: dict, + content_text: str, + voice: str, + ) -> Generator[bytes, None, None]: + """ + Invoke tts + """ + response = self._request_with_plugin_daemon_response_stream( + method="POST", + path=f"plugin/{tenant_id}/dispatch/tts/invoke", + type=PluginStringResultResponse, + data=jsonable_encoder( + { + "user_id": user_id, + "data": { + "provider": provider, + "model_type": model_type, + "model": model, + "credentials": credentials, + "content_text": content_text, + "voice": voice, + }, + } + ), + headers={ + "X-Plugin-ID": plugin_id, + "Content-Type": "application/json", + }, + ) + + try: + for result in response: + hex_str = result.result + yield binascii.unhexlify(hex_str) + except PluginDaemonRespError as e: + raise ValueError(e.resp_message + str(e.code)) + + def get_tts_model_voices( + self, + tenant_id: str, + user_id: str, + plugin_id: str, + provider: str, + model_type: str, + model: str, + credentials: dict, + language: Optional[str] = None, + ) -> list[dict]: + """ + Get tts model voices + """ + response = self._request_with_plugin_daemon_response_stream( + method="POST", + path=f"plugin/{tenant_id}/dispatch/model/voices", + type=PluginVoicesResponse, + data=jsonable_encoder( + { + "user_id": user_id, + "data": { + "provider": provider, + "model_type": model_type, + "model": model, + "credentials": credentials, + "language": language, + }, + } + ), + headers={ + "X-Plugin-ID": plugin_id, + "Content-Type": "application/json", + }, + ) + + for resp in response: + for voice in resp.voices: + return [{"name": voice.name, "value": voice.value}] + + return [] + + def invoke_speech_to_text( + self, + tenant_id: str, + user_id: str, + plugin_id: str, + provider: str, + model_type: str, + model: str, + credentials: dict, + file: IO[bytes], + ) -> str: + """ + Invoke speech to text + """ + response = self._request_with_plugin_daemon_response_stream( + method="POST", + path=f"plugin/{tenant_id}/dispatch/speech2text/invoke", + type=PluginStringResultResponse, + data=jsonable_encoder( + { + "user_id": user_id, + "data": { + "provider": provider, + "model_type": model_type, + "model": model, + "credentials": credentials, + "file": binascii.hexlify(file.read()).decode(), + }, + } + ), + headers={ + "X-Plugin-ID": plugin_id, + "Content-Type": "application/json", + }, + ) + + for resp in response: + return resp.result + + raise ValueError("Failed to invoke speech to text") + + def invoke_moderation( + self, + tenant_id: str, + user_id: str, + plugin_id: str, + provider: str, + model_type: str, + model: str, + credentials: dict, + text: str, + ) -> bool: + """ + Invoke moderation + """ + response = self._request_with_plugin_daemon_response_stream( + method="POST", + path=f"plugin/{tenant_id}/dispatch/moderation/invoke", + type=PluginBasicBooleanResponse, + data=jsonable_encoder( + { + "user_id": user_id, + "data": { + "provider": provider, + "model_type": model_type, + "model": model, + "credentials": credentials, + "text": text, + }, + } + ), + headers={ + "X-Plugin-ID": plugin_id, + "Content-Type": "application/json", + }, + ) + + for resp in response: + return resp.result + + raise ValueError("Failed to invoke moderation") diff --git a/api/core/provider_manager.py b/api/core/provider_manager.py index 3a1fe300df..cb49a6cf56 100644 --- a/api/core/provider_manager.py +++ b/api/core/provider_manager.py @@ -22,7 +22,7 @@ from core.helper.model_provider_cache import ProviderCredentialsCache, ProviderC from core.helper.position_helper import is_filtered from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.entities.provider_entities import CredentialFormSchema, FormType, ProviderEntity -from core.model_runtime.model_providers import model_provider_factory +from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory from extensions import ext_hosting_provider from extensions.ext_database import db from extensions.ext_redis import redis_client @@ -97,6 +97,7 @@ class ProviderManager: provider_name_to_provider_model_records_dict = self._get_all_provider_models(tenant_id) # Get all provider entities + model_provider_factory = ModelProviderFactory(tenant_id) provider_entities = model_provider_factory.get_providers() # Get All preferred provider types of the workspace @@ -204,12 +205,10 @@ class ProviderManager: if not provider_configuration: raise ValueError(f"Provider {provider} does not exist.") - provider_instance = provider_configuration.get_provider_instance() - model_type_instance = provider_instance.get_model_instance(model_type) + model_type_instance = provider_configuration.get_model_type_instance(model_type) return ProviderModelBundle( configuration=provider_configuration, - provider_instance=provider_instance, model_type_instance=model_type_instance, ) @@ -257,8 +256,8 @@ class ProviderManager: if not default_model: return None - provider_instance = model_provider_factory.get_provider_instance(default_model.provider_name) - provider_schema = provider_instance.get_provider_schema() + model_provider_factory = ModelProviderFactory(tenant_id) + provider_schema = model_provider_factory.get_provider_schema(provider=default_model.provider_name) return DefaultModelEntity( model=default_model.model_name, diff --git a/api/core/tools/utils/model_invocation_utils.py b/api/core/tools/utils/model_invocation_utils.py index 4e226810d6..b3c3292f5d 100644 --- a/api/core/tools/utils/model_invocation_utils.py +++ b/api/core/tools/utils/model_invocation_utils.py @@ -10,7 +10,7 @@ from typing import cast from core.model_manager import ModelManager from core.model_runtime.entities.llm_entities import LLMResult from core.model_runtime.entities.message_entities import PromptMessage -from core.model_runtime.entities.model_entities import ModelType +from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType from core.model_runtime.errors.invoke import ( InvokeAuthorizationError, InvokeBadRequestError, @@ -18,7 +18,7 @@ from core.model_runtime.errors.invoke import ( InvokeRateLimitError, InvokeServerUnavailableError, ) -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel, ModelPropertyKey +from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.utils.encoders import jsonable_encoder from extensions.ext_database import db from models.tools import ToolModelInvoke diff --git a/api/services/model_load_balancing_service.py b/api/services/model_load_balancing_service.py index e7b9422cfe..5c5c0c1607 100644 --- a/api/services/model_load_balancing_service.py +++ b/api/services/model_load_balancing_service.py @@ -14,7 +14,7 @@ from core.model_runtime.entities.provider_entities import ( ModelCredentialSchema, ProviderCredentialSchema, ) -from core.model_runtime.model_providers import model_provider_factory +from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory from core.provider_manager import ProviderManager from extensions.ext_database import db from models.provider import LoadBalancingModelConfig @@ -527,6 +527,7 @@ class ModelLoadBalancingService: credentials[key] = encrypter.decrypt_token(tenant_id, original_credentials[key]) if validate: + model_provider_factory = ModelProviderFactory(tenant_id) if isinstance(credential_schemas, ModelCredentialSchema): credentials = model_provider_factory.model_credentials_validate( provider=provider_configuration.provider.provider, diff --git a/api/services/model_provider_service.py b/api/services/model_provider_service.py index 384a072b37..630d575f82 100644 --- a/api/services/model_provider_service.py +++ b/api/services/model_provider_service.py @@ -1,16 +1,12 @@ import logging -import mimetypes import os -from pathlib import Path -from typing import Optional, cast +from typing import Optional import requests -from flask import current_app from core.entities.model_entities import ModelStatus, ProviderModelWithStatusEntity from core.model_runtime.entities.model_entities import ModelType, ParameterRule -from core.model_runtime.model_providers import model_provider_factory -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel +from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory from core.provider_manager import ProviderManager from models.provider import ProviderType from services.entities.model_provider_entities import ( @@ -100,7 +96,7 @@ class ModelProviderService: ModelWithProviderEntityResponse(model) for model in provider_configurations.get_models(provider=provider) ] - def get_provider_credentials(self, tenant_id: str, provider: str) -> dict: + def get_provider_credentials(self, tenant_id: str, provider: str) -> Optional[dict]: """ get provider credentials. @@ -176,7 +172,7 @@ class ModelProviderService: # Remove custom provider credentials. provider_configuration.delete_custom_credentials() - def get_model_credentials(self, tenant_id: str, provider: str, model_type: str, model: str) -> dict: + def get_model_credentials(self, tenant_id: str, provider: str, model_type: str, model: str) -> Optional[dict]: """ get model credentials. @@ -351,18 +347,17 @@ class ModelProviderService: if not provider_configuration: raise ValueError(f"Provider {provider} does not exist.") - # Get model instance of LLM - model_type_instance = provider_configuration.get_model_type_instance(ModelType.LLM) - model_type_instance = cast(LargeLanguageModel, model_type_instance) - # fetch credentials credentials = provider_configuration.get_current_credentials(model_type=ModelType.LLM, model=model) if not credentials: return [] - # Call get_parameter_rules method of model instance to get model parameter rules - return model_type_instance.get_parameter_rules(model=model, credentials=credentials) + model_schema = provider_configuration.get_model_schema( + model_type=ModelType.LLM, model=model, credentials=credentials + ) + + return model_schema.parameter_rules if model_schema else [] def get_default_model_of_model_type(self, tenant_id: str, model_type: str) -> Optional[DefaultModelResponse]: """ @@ -410,52 +405,21 @@ class ModelProviderService: ) def get_model_provider_icon( - self, provider: str, icon_type: str, lang: str + self, tenant_id: str, provider: str, icon_type: str, lang: str ) -> tuple[Optional[bytes], Optional[str]]: """ get model provider icon. + :param tenant_id: workspace id :param provider: provider name :param icon_type: icon type (icon_small or icon_large) :param lang: language (zh_Hans or en_US) :return: """ - provider_instance = model_provider_factory.get_provider_instance(provider) - provider_schema = provider_instance.get_provider_schema() - - if icon_type.lower() == "icon_small": - if not provider_schema.icon_small: - raise ValueError(f"Provider {provider} does not have small icon.") - - if lang.lower() == "zh_hans": - file_name = provider_schema.icon_small.zh_Hans - else: - file_name = provider_schema.icon_small.en_US - else: - if not provider_schema.icon_large: - raise ValueError(f"Provider {provider} does not have large icon.") + model_provider_factory = ModelProviderFactory(tenant_id) + byte_data = model_provider_factory.get_provider_icon(provider, icon_type, lang) - if lang.lower() == "zh_hans": - file_name = provider_schema.icon_large.zh_Hans - else: - file_name = provider_schema.icon_large.en_US - - root_path = current_app.root_path - provider_instance_path = os.path.dirname( - os.path.join(root_path, provider_instance.__class__.__module__.replace(".", "/")) - ) - file_path = os.path.join(provider_instance_path, "_assets") - file_path = os.path.join(file_path, file_name) - - if not os.path.exists(file_path): - return None, None - - mimetype, _ = mimetypes.guess_type(file_path) - mimetype = mimetype or "application/octet-stream" - - # read binary from file - byte_data = Path(file_path).read_bytes() - return byte_data, mimetype + return byte_data, "application/octet-stream" def switch_preferred_provider(self, tenant_id: str, provider: str, preferred_provider_type: str) -> None: """ @@ -525,6 +489,9 @@ class ModelProviderService: def free_quota_submit(self, tenant_id: str, provider: str): api_key = os.environ.get("FREE_QUOTA_APPLY_API_KEY") api_base_url = os.environ.get("FREE_QUOTA_APPLY_BASE_URL") + if not api_base_url: + raise Exception("FREE_QUOTA_APPLY_BASE_URL is not set") + api_url = api_base_url + "/api/v1/providers/apply" headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"} @@ -546,6 +513,9 @@ class ModelProviderService: def free_quota_qualification_verify(self, tenant_id: str, provider: str, token: Optional[str]): api_key = os.environ.get("FREE_QUOTA_APPLY_API_KEY") api_base_url = os.environ.get("FREE_QUOTA_APPLY_BASE_URL") + if not api_base_url: + raise Exception("FREE_QUOTA_APPLY_BASE_URL is not set") + api_url = api_base_url + "/api/v1/providers/qualification-verify" headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"} diff --git a/api/tests/integration_tests/model_runtime/test_model_provider_factory.py b/api/tests/integration_tests/model_runtime/test_model_provider_factory.py index 0ec4b0b724..5cb8a6252a 100644 --- a/api/tests/integration_tests/model_runtime/test_model_provider_factory.py +++ b/api/tests/integration_tests/model_runtime/test_model_provider_factory.py @@ -9,7 +9,7 @@ logger = logging.getLogger(__name__) def test_get_providers(): - factory = ModelProviderFactory() + factory = ModelProviderFactory("test_tenant") providers = factory.get_providers() for provider in providers: @@ -20,7 +20,7 @@ def test_get_providers(): def test_get_models(): - factory = ModelProviderFactory() + factory = ModelProviderFactory("test_tenant") providers = factory.get_models( model_type=ModelType.LLM, provider_configs=[ @@ -51,19 +51,7 @@ def test_get_models(): def test_provider_credentials_validate(): - factory = ModelProviderFactory() + factory = ModelProviderFactory("test_tenant") factory.provider_credentials_validate( provider="openai", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")} ) - - -def test__get_model_provider_map(): - factory = ModelProviderFactory() - model_providers = factory._get_model_provider_map() - - for name, model_provider in model_providers.items(): - logger.debug(name) - logger.debug(model_provider.provider_instance) - - assert len(model_providers) >= 1 - assert isinstance(model_providers["openai"], ModelProviderExtension) diff --git a/api/tests/integration_tests/workflow/nodes/test_llm.py b/api/tests/integration_tests/workflow/nodes/test_llm.py index dfb43650d2..aff6e1c481 100644 --- a/api/tests/integration_tests/workflow/nodes/test_llm.py +++ b/api/tests/integration_tests/workflow/nodes/test_llm.py @@ -115,7 +115,6 @@ def test_execute_llm(setup_openai_mock): custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)), model_settings=[], ), - provider_instance=provider_instance, model_type_instance=model_type_instance, ) model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model="gpt-3.5-turbo") @@ -203,7 +202,6 @@ def test_execute_llm_with_jinja2(setup_code_executor_mock, setup_openai_mock): custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)), model_settings=[], ), - provider_instance=provider_instance, model_type_instance=model_type_instance, ) diff --git a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py index 88435c4022..90c0d54e17 100644 --- a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py +++ b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py @@ -35,23 +35,27 @@ def get_mocked_fetch_model_config( mode: str, credentials: dict, ): - provider_instance = ModelProviderFactory().get_provider_instance(provider) - model_type_instance = provider_instance.get_model_instance(ModelType.LLM) + model_provider_factory = ModelProviderFactory(tenant_id="test_tenant") + model_type_instance = model_provider_factory.get_model_type_instance(provider, ModelType.LLM) provider_model_bundle = ProviderModelBundle( configuration=ProviderConfiguration( tenant_id="1", - provider=provider_instance.get_provider_schema(), + provider=model_provider_factory.get_provider_schema(provider), preferred_provider_type=ProviderType.CUSTOM, using_provider_type=ProviderType.CUSTOM, system_configuration=SystemConfiguration(enabled=False), custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)), model_settings=[], ), - provider_instance=provider_instance, model_type_instance=model_type_instance, ) model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model=model) - model_schema = model_type_instance.get_model_schema(model) + model_schema = model_provider_factory.get_model_schema( + provider=provider, + model_type=model_type_instance.model_type, + model=model, + credentials=credentials, + ) assert model_schema is not None model_config = ModelConfigWithCredentialsEntity( model=model, diff --git a/api/tests/unit_tests/core/test_provider_manager.py b/api/tests/unit_tests/core/test_provider_manager.py index 2f4214a580..44284e03d0 100644 --- a/api/tests/unit_tests/core/test_provider_manager.py +++ b/api/tests/unit_tests/core/test_provider_manager.py @@ -1,12 +1,13 @@ from core.entities.provider_entities import ModelSettings from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.model_providers import model_provider_factory +from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory from core.provider_manager import ProviderManager from models.provider import LoadBalancingModelConfig, ProviderModelSetting def test__to_model_settings(mocker): # Get all provider entities + model_provider_factory = ModelProviderFactory("test_tenant") provider_entities = model_provider_factory.get_providers() provider_entity = None @@ -71,6 +72,7 @@ def test__to_model_settings(mocker): def test__to_model_settings_only_one_lb(mocker): # Get all provider entities + model_provider_factory = ModelProviderFactory("test_tenant") provider_entities = model_provider_factory.get_providers() provider_entity = None @@ -123,6 +125,7 @@ def test__to_model_settings_only_one_lb(mocker): def test__to_model_settings_lb_disabled(mocker): # Get all provider entities + model_provider_factory = ModelProviderFactory("test_tenant") provider_entities = model_provider_factory.get_providers() provider_entity = None