merge
commit
d6b9648332
@ -0,0 +1,44 @@
|
||||
import os
|
||||
from collections.abc import Callable
|
||||
|
||||
import pytest
|
||||
|
||||
# import monkeypatch
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
|
||||
from core.plugin.manager.model import PluginModelManager
|
||||
from tests.integration_tests.model_runtime.__mock.plugin_model import MockModelClass
|
||||
|
||||
|
||||
def mock_plugin_daemon(
|
||||
monkeypatch: MonkeyPatch,
|
||||
) -> Callable[[], None]:
|
||||
"""
|
||||
mock openai module
|
||||
|
||||
:param monkeypatch: pytest monkeypatch fixture
|
||||
:return: unpatch function
|
||||
"""
|
||||
|
||||
def unpatch() -> None:
|
||||
monkeypatch.undo()
|
||||
|
||||
monkeypatch.setattr(PluginModelManager, "invoke_llm", MockModelClass.invoke_llm)
|
||||
monkeypatch.setattr(PluginModelManager, "fetch_model_providers", MockModelClass.fetch_model_providers)
|
||||
monkeypatch.setattr(PluginModelManager, "get_model_schema", MockModelClass.get_model_schema)
|
||||
|
||||
return unpatch
|
||||
|
||||
|
||||
MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def setup_model_mock(monkeypatch):
|
||||
if MOCK:
|
||||
unpatch = mock_plugin_daemon(monkeypatch)
|
||||
|
||||
yield
|
||||
|
||||
if MOCK:
|
||||
unpatch()
|
||||
@ -0,0 +1,249 @@
|
||||
import datetime
|
||||
import uuid
|
||||
from collections.abc import Generator, Sequence
|
||||
from decimal import Decimal
|
||||
from json import dumps
|
||||
|
||||
# import monkeypatch
|
||||
from typing import Optional
|
||||
|
||||
from core.model_runtime.entities.common_entities import I18nObject
|
||||
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage, PromptMessageTool
|
||||
from core.model_runtime.entities.model_entities import (
|
||||
AIModelEntity,
|
||||
FetchFrom,
|
||||
ModelFeature,
|
||||
ModelPropertyKey,
|
||||
ModelType,
|
||||
)
|
||||
from core.model_runtime.entities.provider_entities import ConfigurateMethod, ProviderEntity
|
||||
from core.plugin.entities.plugin_daemon import PluginModelProviderEntity
|
||||
from core.plugin.manager.model import PluginModelManager
|
||||
|
||||
|
||||
class MockModelClass(PluginModelManager):
|
||||
def fetch_model_providers(self, tenant_id: str) -> Sequence[PluginModelProviderEntity]:
|
||||
"""
|
||||
Fetch model providers for the given tenant.
|
||||
"""
|
||||
return [
|
||||
PluginModelProviderEntity(
|
||||
id=uuid.uuid4().hex,
|
||||
created_at=datetime.datetime.now(),
|
||||
updated_at=datetime.datetime.now(),
|
||||
provider="openai",
|
||||
tenant_id=tenant_id,
|
||||
plugin_unique_identifier="langgenius/openai/openai",
|
||||
plugin_id="langgenius/openai",
|
||||
declaration=ProviderEntity(
|
||||
provider="openai",
|
||||
label=I18nObject(
|
||||
en_US="OpenAI",
|
||||
zh_Hans="OpenAI",
|
||||
),
|
||||
description=I18nObject(
|
||||
en_US="OpenAI",
|
||||
zh_Hans="OpenAI",
|
||||
),
|
||||
icon_small=I18nObject(
|
||||
en_US="https://example.com/icon_small.png",
|
||||
zh_Hans="https://example.com/icon_small.png",
|
||||
),
|
||||
icon_large=I18nObject(
|
||||
en_US="https://example.com/icon_large.png",
|
||||
zh_Hans="https://example.com/icon_large.png",
|
||||
),
|
||||
supported_model_types=[ModelType.LLM],
|
||||
configurate_methods=[ConfigurateMethod.PREDEFINED_MODEL],
|
||||
models=[
|
||||
AIModelEntity(
|
||||
model="gpt-3.5-turbo",
|
||||
label=I18nObject(
|
||||
en_US="gpt-3.5-turbo",
|
||||
zh_Hans="gpt-3.5-turbo",
|
||||
),
|
||||
model_type=ModelType.LLM,
|
||||
fetch_from=FetchFrom.PREDEFINED_MODEL,
|
||||
model_properties={},
|
||||
features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL],
|
||||
),
|
||||
AIModelEntity(
|
||||
model="gpt-3.5-turbo-instruct",
|
||||
label=I18nObject(
|
||||
en_US="gpt-3.5-turbo-instruct",
|
||||
zh_Hans="gpt-3.5-turbo-instruct",
|
||||
),
|
||||
model_type=ModelType.LLM,
|
||||
fetch_from=FetchFrom.PREDEFINED_MODEL,
|
||||
model_properties={
|
||||
ModelPropertyKey.MODE: LLMMode.COMPLETION,
|
||||
},
|
||||
features=[],
|
||||
),
|
||||
],
|
||||
),
|
||||
)
|
||||
]
|
||||
|
||||
def get_model_schema(
|
||||
self,
|
||||
tenant_id: str,
|
||||
user_id: str,
|
||||
plugin_id: str,
|
||||
provider: str,
|
||||
model_type: str,
|
||||
model: str,
|
||||
credentials: dict,
|
||||
) -> AIModelEntity | None:
|
||||
"""
|
||||
Get model schema
|
||||
"""
|
||||
return AIModelEntity(
|
||||
model=model,
|
||||
label=I18nObject(
|
||||
en_US="OpenAI",
|
||||
zh_Hans="OpenAI",
|
||||
),
|
||||
model_type=ModelType(model_type),
|
||||
fetch_from=FetchFrom.PREDEFINED_MODEL,
|
||||
model_properties={},
|
||||
features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL] if model == "gpt-3.5-turbo" else [],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def generate_function_call(
|
||||
tools: Optional[list[PromptMessageTool]],
|
||||
) -> Optional[AssistantPromptMessage.ToolCall]:
|
||||
if not tools or len(tools) == 0:
|
||||
return None
|
||||
function: PromptMessageTool = tools[0]
|
||||
function_name = function.name
|
||||
function_parameters = function.parameters
|
||||
function_parameters_type = function_parameters["type"]
|
||||
if function_parameters_type != "object":
|
||||
return None
|
||||
function_parameters_properties = function_parameters["properties"]
|
||||
function_parameters_required = function_parameters["required"]
|
||||
parameters = {}
|
||||
for parameter_name, parameter in function_parameters_properties.items():
|
||||
if parameter_name not in function_parameters_required:
|
||||
continue
|
||||
parameter_type = parameter["type"]
|
||||
if parameter_type == "string":
|
||||
if "enum" in parameter:
|
||||
if len(parameter["enum"]) == 0:
|
||||
continue
|
||||
parameters[parameter_name] = parameter["enum"][0]
|
||||
else:
|
||||
parameters[parameter_name] = "kawaii"
|
||||
elif parameter_type == "integer":
|
||||
parameters[parameter_name] = 114514
|
||||
elif parameter_type == "number":
|
||||
parameters[parameter_name] = 1919810.0
|
||||
elif parameter_type == "boolean":
|
||||
parameters[parameter_name] = True
|
||||
|
||||
return AssistantPromptMessage.ToolCall(
|
||||
id=str(uuid.uuid4()),
|
||||
type="function",
|
||||
function=AssistantPromptMessage.ToolCall.ToolCallFunction(
|
||||
name=function_name,
|
||||
arguments=dumps(parameters),
|
||||
),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def mocked_chat_create_sync(
|
||||
model: str,
|
||||
prompt_messages: list[PromptMessage],
|
||||
tools: Optional[list[PromptMessageTool]] = None,
|
||||
) -> LLMResult:
|
||||
tool_call = MockModelClass.generate_function_call(tools=tools)
|
||||
|
||||
return LLMResult(
|
||||
id=str(uuid.uuid4()),
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
message=AssistantPromptMessage(content="elaina", tool_calls=[tool_call] if tool_call else []),
|
||||
usage=LLMUsage(
|
||||
prompt_tokens=2,
|
||||
completion_tokens=1,
|
||||
total_tokens=3,
|
||||
prompt_unit_price=Decimal(0.0001),
|
||||
completion_unit_price=Decimal(0.0002),
|
||||
prompt_price_unit=Decimal(1),
|
||||
prompt_price=Decimal(0.0001),
|
||||
completion_price_unit=Decimal(1),
|
||||
completion_price=Decimal(0.0002),
|
||||
total_price=Decimal(0.0003),
|
||||
currency="USD",
|
||||
latency=0.001,
|
||||
),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def mocked_chat_create_stream(
|
||||
model: str,
|
||||
prompt_messages: list[PromptMessage],
|
||||
tools: Optional[list[PromptMessageTool]] = None,
|
||||
) -> Generator[LLMResultChunk, None, None]:
|
||||
tool_call = MockModelClass.generate_function_call(tools=tools)
|
||||
|
||||
full_text = "Hello, world!\n\n```python\nprint('Hello, world!')\n```"
|
||||
for i in range(0, len(full_text) + 1):
|
||||
if i == len(full_text):
|
||||
yield LLMResultChunk(
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
delta=LLMResultChunkDelta(
|
||||
index=0,
|
||||
message=AssistantPromptMessage(
|
||||
content="",
|
||||
tool_calls=[tool_call] if tool_call else [],
|
||||
),
|
||||
),
|
||||
)
|
||||
else:
|
||||
yield LLMResultChunk(
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
delta=LLMResultChunkDelta(
|
||||
index=0,
|
||||
message=AssistantPromptMessage(
|
||||
content=full_text[i],
|
||||
tool_calls=[tool_call] if tool_call else [],
|
||||
),
|
||||
usage=LLMUsage(
|
||||
prompt_tokens=2,
|
||||
completion_tokens=17,
|
||||
total_tokens=19,
|
||||
prompt_unit_price=Decimal(0.0001),
|
||||
completion_unit_price=Decimal(0.0002),
|
||||
prompt_price_unit=Decimal(1),
|
||||
prompt_price=Decimal(0.0001),
|
||||
completion_price_unit=Decimal(1),
|
||||
completion_price=Decimal(0.0002),
|
||||
total_price=Decimal(0.0003),
|
||||
currency="USD",
|
||||
latency=0.001,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
def invoke_llm(
|
||||
self: PluginModelManager,
|
||||
*,
|
||||
tenant_id: str,
|
||||
user_id: str,
|
||||
plugin_id: str,
|
||||
provider: str,
|
||||
model: str,
|
||||
credentials: dict,
|
||||
prompt_messages: list[PromptMessage],
|
||||
model_parameters: Optional[dict] = None,
|
||||
tools: Optional[list[PromptMessageTool]] = None,
|
||||
stop: Optional[list[str]] = None,
|
||||
stream: bool = True,
|
||||
):
|
||||
return MockModelClass.mocked_chat_create_stream(model=model, prompt_messages=prompt_messages, tools=tools)
|
||||
@ -1,55 +0,0 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.gpustack.speech2text.speech2text import GPUStackSpeech2TextModel
|
||||
|
||||
|
||||
def test_validate_credentials():
|
||||
model = GPUStackSpeech2TextModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model="faster-whisper-medium",
|
||||
credentials={
|
||||
"endpoint_url": "invalid_url",
|
||||
"api_key": "invalid_api_key",
|
||||
},
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model="faster-whisper-medium",
|
||||
credentials={
|
||||
"endpoint_url": os.environ.get("GPUSTACK_SERVER_URL"),
|
||||
"api_key": os.environ.get("GPUSTACK_API_KEY"),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = GPUStackSpeech2TextModel()
|
||||
|
||||
# Get the directory of the current file
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# Get assets directory
|
||||
assets_dir = os.path.join(os.path.dirname(current_dir), "assets")
|
||||
|
||||
# Construct the path to the audio file
|
||||
audio_file_path = os.path.join(assets_dir, "audio.mp3")
|
||||
|
||||
file = Path(audio_file_path).read_bytes()
|
||||
|
||||
result = model.invoke(
|
||||
model="faster-whisper-medium",
|
||||
credentials={
|
||||
"endpoint_url": os.environ.get("GPUSTACK_SERVER_URL"),
|
||||
"api_key": os.environ.get("GPUSTACK_API_KEY"),
|
||||
},
|
||||
file=file,
|
||||
)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert result == "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
|
||||
@ -1,24 +0,0 @@
|
||||
import os
|
||||
|
||||
from core.model_runtime.model_providers.gpustack.tts.tts import GPUStackText2SpeechModel
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = GPUStackText2SpeechModel()
|
||||
|
||||
result = model.invoke(
|
||||
model="cosyvoice-300m-sft",
|
||||
tenant_id="test",
|
||||
credentials={
|
||||
"endpoint_url": os.environ.get("GPUSTACK_SERVER_URL"),
|
||||
"api_key": os.environ.get("GPUSTACK_API_KEY"),
|
||||
},
|
||||
content_text="Hello world",
|
||||
voice="Chinese Female",
|
||||
)
|
||||
|
||||
content = b""
|
||||
for chunk in result:
|
||||
content += chunk
|
||||
|
||||
assert content != b""
|
||||
@ -0,0 +1,50 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
||||
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
from models.provider import ProviderType
|
||||
|
||||
|
||||
def get_mocked_fetch_model_config(
|
||||
provider: str,
|
||||
model: str,
|
||||
mode: str,
|
||||
credentials: dict,
|
||||
):
|
||||
model_provider_factory = ModelProviderFactory(tenant_id="test_tenant")
|
||||
model_type_instance = model_provider_factory.get_model_type_instance(provider, ModelType.LLM)
|
||||
provider_model_bundle = ProviderModelBundle(
|
||||
configuration=ProviderConfiguration(
|
||||
tenant_id="1",
|
||||
provider=model_provider_factory.get_provider_schema(provider),
|
||||
preferred_provider_type=ProviderType.CUSTOM,
|
||||
using_provider_type=ProviderType.CUSTOM,
|
||||
system_configuration=SystemConfiguration(enabled=False),
|
||||
custom_configuration=CustomConfiguration(provider=CustomProviderConfiguration(credentials=credentials)),
|
||||
model_settings=[],
|
||||
),
|
||||
model_type_instance=model_type_instance,
|
||||
)
|
||||
model_instance = ModelInstance(provider_model_bundle=provider_model_bundle, model=model)
|
||||
model_schema = model_provider_factory.get_model_schema(
|
||||
provider=provider,
|
||||
model_type=model_type_instance.model_type,
|
||||
model=model,
|
||||
credentials=credentials,
|
||||
)
|
||||
assert model_schema is not None
|
||||
model_config = ModelConfigWithCredentialsEntity(
|
||||
model=model,
|
||||
provider=provider,
|
||||
mode=mode,
|
||||
credentials=credentials,
|
||||
parameters={},
|
||||
model_schema=model_schema,
|
||||
provider_model_bundle=provider_model_bundle,
|
||||
)
|
||||
|
||||
return MagicMock(return_value=(model_instance, model_config))
|
||||
@ -1,52 +1,52 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from core.app.app_config.entities import ModelConfigEntity
|
||||
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
from core.model_runtime.entities.message_entities import UserPromptMessage
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, ModelPropertyKey, ParameterRule
|
||||
from core.model_runtime.entities.provider_entities import ProviderEntity
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
from core.prompt.prompt_transform import PromptTransform
|
||||
|
||||
|
||||
def test__calculate_rest_token():
|
||||
model_schema_mock = MagicMock(spec=AIModelEntity)
|
||||
parameter_rule_mock = MagicMock(spec=ParameterRule)
|
||||
parameter_rule_mock.name = "max_tokens"
|
||||
model_schema_mock.parameter_rules = [parameter_rule_mock]
|
||||
model_schema_mock.model_properties = {ModelPropertyKey.CONTEXT_SIZE: 62}
|
||||
|
||||
large_language_model_mock = MagicMock(spec=LargeLanguageModel)
|
||||
large_language_model_mock.get_num_tokens.return_value = 6
|
||||
|
||||
provider_mock = MagicMock(spec=ProviderEntity)
|
||||
provider_mock.provider = "openai"
|
||||
|
||||
provider_configuration_mock = MagicMock(spec=ProviderConfiguration)
|
||||
provider_configuration_mock.provider = provider_mock
|
||||
provider_configuration_mock.model_settings = None
|
||||
|
||||
provider_model_bundle_mock = MagicMock(spec=ProviderModelBundle)
|
||||
provider_model_bundle_mock.model_type_instance = large_language_model_mock
|
||||
provider_model_bundle_mock.configuration = provider_configuration_mock
|
||||
|
||||
model_config_mock = MagicMock(spec=ModelConfigEntity)
|
||||
model_config_mock.model = "gpt-4"
|
||||
model_config_mock.credentials = {}
|
||||
model_config_mock.parameters = {"max_tokens": 50}
|
||||
model_config_mock.model_schema = model_schema_mock
|
||||
model_config_mock.provider_model_bundle = provider_model_bundle_mock
|
||||
|
||||
prompt_transform = PromptTransform()
|
||||
|
||||
prompt_messages = [UserPromptMessage(content="Hello, how are you?")]
|
||||
rest_tokens = prompt_transform._calculate_rest_token(prompt_messages, model_config_mock)
|
||||
|
||||
# Validate based on the mock configuration and expected logic
|
||||
expected_rest_tokens = (
|
||||
model_schema_mock.model_properties[ModelPropertyKey.CONTEXT_SIZE]
|
||||
- model_config_mock.parameters["max_tokens"]
|
||||
- large_language_model_mock.get_num_tokens.return_value
|
||||
)
|
||||
assert rest_tokens == expected_rest_tokens
|
||||
assert rest_tokens == 6
|
||||
# from unittest.mock import MagicMock
|
||||
|
||||
# from core.app.app_config.entities import ModelConfigEntity
|
||||
# from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
# from core.model_runtime.entities.message_entities import UserPromptMessage
|
||||
# from core.model_runtime.entities.model_entities import AIModelEntity, ModelPropertyKey, ParameterRule
|
||||
# from core.model_runtime.entities.provider_entities import ProviderEntity
|
||||
# from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
# from core.prompt.prompt_transform import PromptTransform
|
||||
|
||||
|
||||
# def test__calculate_rest_token():
|
||||
# model_schema_mock = MagicMock(spec=AIModelEntity)
|
||||
# parameter_rule_mock = MagicMock(spec=ParameterRule)
|
||||
# parameter_rule_mock.name = "max_tokens"
|
||||
# model_schema_mock.parameter_rules = [parameter_rule_mock]
|
||||
# model_schema_mock.model_properties = {ModelPropertyKey.CONTEXT_SIZE: 62}
|
||||
|
||||
# large_language_model_mock = MagicMock(spec=LargeLanguageModel)
|
||||
# large_language_model_mock.get_num_tokens.return_value = 6
|
||||
|
||||
# provider_mock = MagicMock(spec=ProviderEntity)
|
||||
# provider_mock.provider = "openai"
|
||||
|
||||
# provider_configuration_mock = MagicMock(spec=ProviderConfiguration)
|
||||
# provider_configuration_mock.provider = provider_mock
|
||||
# provider_configuration_mock.model_settings = None
|
||||
|
||||
# provider_model_bundle_mock = MagicMock(spec=ProviderModelBundle)
|
||||
# provider_model_bundle_mock.model_type_instance = large_language_model_mock
|
||||
# provider_model_bundle_mock.configuration = provider_configuration_mock
|
||||
|
||||
# model_config_mock = MagicMock(spec=ModelConfigEntity)
|
||||
# model_config_mock.model = "gpt-4"
|
||||
# model_config_mock.credentials = {}
|
||||
# model_config_mock.parameters = {"max_tokens": 50}
|
||||
# model_config_mock.model_schema = model_schema_mock
|
||||
# model_config_mock.provider_model_bundle = provider_model_bundle_mock
|
||||
|
||||
# prompt_transform = PromptTransform()
|
||||
|
||||
# prompt_messages = [UserPromptMessage(content="Hello, how are you?")]
|
||||
# rest_tokens = prompt_transform._calculate_rest_token(prompt_messages, model_config_mock)
|
||||
|
||||
# # Validate based on the mock configuration and expected logic
|
||||
# expected_rest_tokens = (
|
||||
# model_schema_mock.model_properties[ModelPropertyKey.CONTEXT_SIZE]
|
||||
# - model_config_mock.parameters["max_tokens"]
|
||||
# - large_language_model_mock.get_num_tokens.return_value
|
||||
# )
|
||||
# assert rest_tokens == expected_rest_tokens
|
||||
# assert rest_tokens == 6
|
||||
|
||||
@ -1,186 +1,190 @@
|
||||
from core.entities.provider_entities import ModelSettings
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
from core.provider_manager import ProviderManager
|
||||
from models.provider import LoadBalancingModelConfig, ProviderModelSetting
|
||||
|
||||
|
||||
def test__to_model_settings(mocker):
|
||||
# Get all provider entities
|
||||
model_provider_factory = ModelProviderFactory("test_tenant")
|
||||
provider_entities = model_provider_factory.get_providers()
|
||||
|
||||
provider_entity = None
|
||||
for provider in provider_entities:
|
||||
if provider.provider == "openai":
|
||||
provider_entity = provider
|
||||
|
||||
# Mocking the inputs
|
||||
provider_model_settings = [
|
||||
ProviderModelSetting(
|
||||
id="id",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
enabled=True,
|
||||
load_balancing_enabled=True,
|
||||
)
|
||||
]
|
||||
load_balancing_model_configs = [
|
||||
LoadBalancingModelConfig(
|
||||
id="id1",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
name="__inherit__",
|
||||
encrypted_config=None,
|
||||
enabled=True,
|
||||
),
|
||||
LoadBalancingModelConfig(
|
||||
id="id2",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
name="first",
|
||||
encrypted_config='{"openai_api_key": "fake_key"}',
|
||||
enabled=True,
|
||||
),
|
||||
]
|
||||
|
||||
mocker.patch(
|
||||
"core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
|
||||
)
|
||||
|
||||
provider_manager = ProviderManager()
|
||||
|
||||
# Running the method
|
||||
result = provider_manager._to_model_settings(provider_entity, provider_model_settings, load_balancing_model_configs)
|
||||
|
||||
# Asserting that the result is as expected
|
||||
assert len(result) == 1
|
||||
assert isinstance(result[0], ModelSettings)
|
||||
assert result[0].model == "gpt-4"
|
||||
assert result[0].model_type == ModelType.LLM
|
||||
assert result[0].enabled is True
|
||||
assert len(result[0].load_balancing_configs) == 2
|
||||
assert result[0].load_balancing_configs[0].name == "__inherit__"
|
||||
assert result[0].load_balancing_configs[1].name == "first"
|
||||
|
||||
|
||||
def test__to_model_settings_only_one_lb(mocker):
|
||||
# Get all provider entities
|
||||
model_provider_factory = ModelProviderFactory("test_tenant")
|
||||
provider_entities = model_provider_factory.get_providers()
|
||||
|
||||
provider_entity = None
|
||||
for provider in provider_entities:
|
||||
if provider.provider == "openai":
|
||||
provider_entity = provider
|
||||
|
||||
# Mocking the inputs
|
||||
provider_model_settings = [
|
||||
ProviderModelSetting(
|
||||
id="id",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
enabled=True,
|
||||
load_balancing_enabled=True,
|
||||
)
|
||||
]
|
||||
load_balancing_model_configs = [
|
||||
LoadBalancingModelConfig(
|
||||
id="id1",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
name="__inherit__",
|
||||
encrypted_config=None,
|
||||
enabled=True,
|
||||
)
|
||||
]
|
||||
|
||||
mocker.patch(
|
||||
"core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
|
||||
)
|
||||
|
||||
provider_manager = ProviderManager()
|
||||
|
||||
# Running the method
|
||||
result = provider_manager._to_model_settings(provider_entity, provider_model_settings, load_balancing_model_configs)
|
||||
|
||||
# Asserting that the result is as expected
|
||||
assert len(result) == 1
|
||||
assert isinstance(result[0], ModelSettings)
|
||||
assert result[0].model == "gpt-4"
|
||||
assert result[0].model_type == ModelType.LLM
|
||||
assert result[0].enabled is True
|
||||
assert len(result[0].load_balancing_configs) == 0
|
||||
|
||||
|
||||
def test__to_model_settings_lb_disabled(mocker):
|
||||
# Get all provider entities
|
||||
model_provider_factory = ModelProviderFactory("test_tenant")
|
||||
provider_entities = model_provider_factory.get_providers()
|
||||
|
||||
provider_entity = None
|
||||
for provider in provider_entities:
|
||||
if provider.provider == "openai":
|
||||
provider_entity = provider
|
||||
|
||||
# Mocking the inputs
|
||||
provider_model_settings = [
|
||||
ProviderModelSetting(
|
||||
id="id",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
enabled=True,
|
||||
load_balancing_enabled=False,
|
||||
)
|
||||
]
|
||||
load_balancing_model_configs = [
|
||||
LoadBalancingModelConfig(
|
||||
id="id1",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
name="__inherit__",
|
||||
encrypted_config=None,
|
||||
enabled=True,
|
||||
),
|
||||
LoadBalancingModelConfig(
|
||||
id="id2",
|
||||
tenant_id="tenant_id",
|
||||
provider_name="openai",
|
||||
model_name="gpt-4",
|
||||
model_type="text-generation",
|
||||
name="first",
|
||||
encrypted_config='{"openai_api_key": "fake_key"}',
|
||||
enabled=True,
|
||||
),
|
||||
]
|
||||
|
||||
mocker.patch(
|
||||
"core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
|
||||
)
|
||||
|
||||
provider_manager = ProviderManager()
|
||||
|
||||
# Running the method
|
||||
result = provider_manager._to_model_settings(provider_entity, provider_model_settings, load_balancing_model_configs)
|
||||
|
||||
# Asserting that the result is as expected
|
||||
assert len(result) == 1
|
||||
assert isinstance(result[0], ModelSettings)
|
||||
assert result[0].model == "gpt-4"
|
||||
assert result[0].model_type == ModelType.LLM
|
||||
assert result[0].enabled is True
|
||||
assert len(result[0].load_balancing_configs) == 0
|
||||
# from core.entities.provider_entities import ModelSettings
|
||||
# from core.model_runtime.entities.model_entities import ModelType
|
||||
# from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory
|
||||
# from core.provider_manager import ProviderManager
|
||||
# from models.provider import LoadBalancingModelConfig, ProviderModelSetting
|
||||
|
||||
|
||||
# def test__to_model_settings(mocker):
|
||||
# # Get all provider entities
|
||||
# model_provider_factory = ModelProviderFactory("test_tenant")
|
||||
# provider_entities = model_provider_factory.get_providers()
|
||||
|
||||
# provider_entity = None
|
||||
# for provider in provider_entities:
|
||||
# if provider.provider == "openai":
|
||||
# provider_entity = provider
|
||||
|
||||
# # Mocking the inputs
|
||||
# provider_model_settings = [
|
||||
# ProviderModelSetting(
|
||||
# id="id",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# enabled=True,
|
||||
# load_balancing_enabled=True,
|
||||
# )
|
||||
# ]
|
||||
# load_balancing_model_configs = [
|
||||
# LoadBalancingModelConfig(
|
||||
# id="id1",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# name="__inherit__",
|
||||
# encrypted_config=None,
|
||||
# enabled=True,
|
||||
# ),
|
||||
# LoadBalancingModelConfig(
|
||||
# id="id2",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# name="first",
|
||||
# encrypted_config='{"openai_api_key": "fake_key"}',
|
||||
# enabled=True,
|
||||
# ),
|
||||
# ]
|
||||
|
||||
# mocker.patch(
|
||||
# "core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
|
||||
# )
|
||||
|
||||
# provider_manager = ProviderManager()
|
||||
|
||||
# # Running the method
|
||||
# result = provider_manager._to_model_settings(provider_entity,
|
||||
# provider_model_settings, load_balancing_model_configs)
|
||||
|
||||
# # Asserting that the result is as expected
|
||||
# assert len(result) == 1
|
||||
# assert isinstance(result[0], ModelSettings)
|
||||
# assert result[0].model == "gpt-4"
|
||||
# assert result[0].model_type == ModelType.LLM
|
||||
# assert result[0].enabled is True
|
||||
# assert len(result[0].load_balancing_configs) == 2
|
||||
# assert result[0].load_balancing_configs[0].name == "__inherit__"
|
||||
# assert result[0].load_balancing_configs[1].name == "first"
|
||||
|
||||
|
||||
# def test__to_model_settings_only_one_lb(mocker):
|
||||
# # Get all provider entities
|
||||
# model_provider_factory = ModelProviderFactory("test_tenant")
|
||||
# provider_entities = model_provider_factory.get_providers()
|
||||
|
||||
# provider_entity = None
|
||||
# for provider in provider_entities:
|
||||
# if provider.provider == "openai":
|
||||
# provider_entity = provider
|
||||
|
||||
# # Mocking the inputs
|
||||
# provider_model_settings = [
|
||||
# ProviderModelSetting(
|
||||
# id="id",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# enabled=True,
|
||||
# load_balancing_enabled=True,
|
||||
# )
|
||||
# ]
|
||||
# load_balancing_model_configs = [
|
||||
# LoadBalancingModelConfig(
|
||||
# id="id1",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# name="__inherit__",
|
||||
# encrypted_config=None,
|
||||
# enabled=True,
|
||||
# )
|
||||
# ]
|
||||
|
||||
# mocker.patch(
|
||||
# "core.helper.model_provider_cache.ProviderCredentialsCache.get", return_value={"openai_api_key": "fake_key"}
|
||||
# )
|
||||
|
||||
# provider_manager = ProviderManager()
|
||||
|
||||
# # Running the method
|
||||
# result = provider_manager._to_model_settings(
|
||||
# provider_entity, provider_model_settings, load_balancing_model_configs)
|
||||
|
||||
# # Asserting that the result is as expected
|
||||
# assert len(result) == 1
|
||||
# assert isinstance(result[0], ModelSettings)
|
||||
# assert result[0].model == "gpt-4"
|
||||
# assert result[0].model_type == ModelType.LLM
|
||||
# assert result[0].enabled is True
|
||||
# assert len(result[0].load_balancing_configs) == 0
|
||||
|
||||
|
||||
# def test__to_model_settings_lb_disabled(mocker):
|
||||
# # Get all provider entities
|
||||
# model_provider_factory = ModelProviderFactory("test_tenant")
|
||||
# provider_entities = model_provider_factory.get_providers()
|
||||
|
||||
# provider_entity = None
|
||||
# for provider in provider_entities:
|
||||
# if provider.provider == "openai":
|
||||
# provider_entity = provider
|
||||
|
||||
# # Mocking the inputs
|
||||
# provider_model_settings = [
|
||||
# ProviderModelSetting(
|
||||
# id="id",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# enabled=True,
|
||||
# load_balancing_enabled=False,
|
||||
# )
|
||||
# ]
|
||||
# load_balancing_model_configs = [
|
||||
# LoadBalancingModelConfig(
|
||||
# id="id1",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# name="__inherit__",
|
||||
# encrypted_config=None,
|
||||
# enabled=True,
|
||||
# ),
|
||||
# LoadBalancingModelConfig(
|
||||
# id="id2",
|
||||
# tenant_id="tenant_id",
|
||||
# provider_name="openai",
|
||||
# model_name="gpt-4",
|
||||
# model_type="text-generation",
|
||||
# name="first",
|
||||
# encrypted_config='{"openai_api_key": "fake_key"}',
|
||||
# enabled=True,
|
||||
# ),
|
||||
# ]
|
||||
|
||||
# mocker.patch(
|
||||
# "core.helper.model_provider_cache.ProviderCredentialsCache.get",
|
||||
# return_value={"openai_api_key": "fake_key"}
|
||||
# )
|
||||
|
||||
# provider_manager = ProviderManager()
|
||||
|
||||
# # Running the method
|
||||
# result = provider_manager._to_model_settings(provider_entity,
|
||||
# provider_model_settings, load_balancing_model_configs)
|
||||
|
||||
# # Asserting that the result is as expected
|
||||
# assert len(result) == 1
|
||||
# assert isinstance(result[0], ModelSettings)
|
||||
# assert result[0].model == "gpt-4"
|
||||
# assert result[0].model_type == ModelType.LLM
|
||||
# assert result[0].enabled is True
|
||||
# assert len(result[0].load_balancing_configs) == 0
|
||||
|
||||
@ -0,0 +1,186 @@
|
||||
<svg width="90" height="10" viewBox="0 0 90 10" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="Anthropic" clip-path="url(#clip0_5981_49007)">
|
||||
<g id="Clip path group">
|
||||
<mask id="mask0_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_2">
|
||||
<path id="Vector" d="M89.375 -0.00195312H0V9.99805H89.375V-0.00195312Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask0_5981_49007)">
|
||||
<g id="Group">
|
||||
<g id="Clip path group_2">
|
||||
<mask id="mask1_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_4">
|
||||
<path id="Vector_2" d="M0 -0.00390625H89.375V9.99609H0V-0.00390625Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask1_5981_49007)">
|
||||
<g id="Group_2">
|
||||
<g id="Clip path group_3">
|
||||
<mask id="mask2_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_12">
|
||||
<path id="Vector_3" d="M0 -0.00585938H89.375V9.99414H0V-0.00585938Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask2_5981_49007)">
|
||||
<g id="Group_3">
|
||||
<g id="Clip path group_4">
|
||||
<mask id="mask3_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_89">
|
||||
<path id="Vector_4" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask3_5981_49007)">
|
||||
<g id="Group_4">
|
||||
<g id="Group_5">
|
||||
<g id="Group_6">
|
||||
<path id="Vector_5" d="M18.1273 6.92438L13.7773 0.15625H11.4297V9.82501H13.4321V3.05688L17.7821 9.82501H20.1297V0.15625H18.1273V6.92438Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_5">
|
||||
<mask id="mask4_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_80">
|
||||
<path id="Vector_6" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask4_5981_49007)">
|
||||
<g id="Group_7">
|
||||
<g id="Group_8">
|
||||
<g id="Group_9">
|
||||
<path id="Vector_7" d="M21.7969 2.02094H25.0423V9.82501H27.1139V2.02094H30.3594V0.15625H21.7969V2.02094Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_6">
|
||||
<mask id="mask5_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_71">
|
||||
<path id="Vector_8" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask5_5981_49007)">
|
||||
<g id="Group_10">
|
||||
<g id="Group_11">
|
||||
<g id="Group_12">
|
||||
<path id="Vector_9" d="M38.6442 4.00994H34.0871V0.15625H32.0156V9.82501H34.0871V5.87463H38.6442V9.82501H40.7156V0.15625H38.6442V4.00994Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_7">
|
||||
<mask id="mask6_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_62">
|
||||
<path id="Vector_10" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask6_5981_49007)">
|
||||
<g id="Group_13">
|
||||
<g id="Group_14">
|
||||
<g id="Group_15">
|
||||
<path id="Vector_11" d="M45.3376 2.02094H47.893C48.9152 2.02094 49.4539 2.39387 49.4539 3.09831C49.4539 3.80275 48.9152 4.17569 47.893 4.17569H45.3376V2.02094ZM51.5259 3.09831C51.5259 1.27506 50.186 0.15625 47.9897 0.15625H43.2656V9.82501H45.3376V6.04037H47.6443L49.7164 9.82501H52.0094L49.715 5.75211C50.8666 5.30941 51.5259 4.37721 51.5259 3.09831Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_8">
|
||||
<mask id="mask7_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_53">
|
||||
<path id="Vector_12" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask7_5981_49007)">
|
||||
<g id="Group_16">
|
||||
<g id="Group_17">
|
||||
<g id="Group_18">
|
||||
<path id="Vector_13" d="M57.8732 8.05653C56.2438 8.05653 55.2496 6.89631 55.2496 5.00404C55.2496 3.08416 56.2438 1.92394 57.8732 1.92394C59.4887 1.92394 60.4691 3.08416 60.4691 5.00404C60.4691 6.89631 59.4887 8.05653 57.8732 8.05653ZM57.8732 -0.00976562C55.0839 -0.00976562 53.1094 2.06206 53.1094 5.00404C53.1094 7.91841 55.0839 9.99023 57.8732 9.99023C60.6486 9.99023 62.6094 7.91841 62.6094 5.00404C62.6094 2.06206 60.6486 -0.00976562 57.8732 -0.00976562Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_9">
|
||||
<mask id="mask8_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_44">
|
||||
<path id="Vector_14" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask8_5981_49007)">
|
||||
<g id="Group_19">
|
||||
<g id="Group_20">
|
||||
<g id="Group_21">
|
||||
<path id="Vector_15" d="M69.1794 4.45194H66.6233V2.02094H69.1794C70.2019 2.02094 70.7407 2.43532 70.7407 3.23644C70.7407 4.03756 70.2019 4.45194 69.1794 4.45194ZM69.2762 0.15625H64.5508V9.82501H66.6233V6.31662H69.2762C71.473 6.31662 72.8133 5.15637 72.8133 3.23644C72.8133 1.3165 71.473 0.15625 69.2762 0.15625Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_10">
|
||||
<mask id="mask9_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_35">
|
||||
<path id="Vector_16" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask9_5981_49007)">
|
||||
<g id="Group_22">
|
||||
<g id="Group_23">
|
||||
<g id="Group_24">
|
||||
<path id="Vector_17" d="M86.8413 6.57863C86.4823 7.51786 85.7642 8.05653 84.7837 8.05653C83.1542 8.05653 82.16 6.89631 82.16 5.00404C82.16 3.08416 83.1542 1.92394 84.7837 1.92394C85.7642 1.92394 86.4823 2.46261 86.8413 3.40183H89.0369C88.4984 1.33002 86.8827 -0.00976562 84.7837 -0.00976562C81.9942 -0.00976562 80.0195 2.06206 80.0195 5.00404C80.0195 7.91841 81.9942 9.99023 84.7837 9.99023C86.8965 9.99023 88.5122 8.63664 89.0508 6.57863H86.8413Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_11">
|
||||
<mask id="mask10_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_26">
|
||||
<path id="Vector_18" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask10_5981_49007)">
|
||||
<g id="Group_25">
|
||||
<g id="Group_26">
|
||||
<g id="Group_27">
|
||||
<path id="Vector_19" d="M73.6484 0.15625L77.5033 9.82501H79.6172L75.7624 0.15625H73.6484Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_12">
|
||||
<mask id="mask11_5981_49007" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_17">
|
||||
<path id="Vector_20" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask11_5981_49007)">
|
||||
<g id="Group_28">
|
||||
<g id="Group_29">
|
||||
<g id="Group_30">
|
||||
<path id="Vector_21" d="M3.64038 5.99893L4.95938 2.60106L6.27838 5.99893H3.64038ZM3.85422 0.15625L0 9.82501H2.15505L2.9433 7.79456H6.97558L7.76371 9.82501H9.91875L6.06453 0.15625H3.85422Z" fill="black" fill-opacity="0.95"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_5981_49007">
|
||||
<rect width="89.375" height="10" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 7.1 KiB |
@ -0,0 +1,186 @@
|
||||
<svg width="90" height="10" viewBox="0 0 90 10" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="Anthropic" clip-path="url(#clip0_5981_52010)">
|
||||
<g id="Clip path group">
|
||||
<mask id="mask0_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_2">
|
||||
<path id="Vector" d="M89.375 -0.00195312H0V9.99805H89.375V-0.00195312Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask0_5981_52010)">
|
||||
<g id="Group">
|
||||
<g id="Clip path group_2">
|
||||
<mask id="mask1_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_4">
|
||||
<path id="Vector_2" d="M0 -0.00390625H89.375V9.99609H0V-0.00390625Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask1_5981_52010)">
|
||||
<g id="Group_2">
|
||||
<g id="Clip path group_3">
|
||||
<mask id="mask2_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_12">
|
||||
<path id="Vector_3" d="M0 -0.00585938H89.375V9.99414H0V-0.00585938Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask2_5981_52010)">
|
||||
<g id="Group_3">
|
||||
<g id="Clip path group_4">
|
||||
<mask id="mask3_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_89">
|
||||
<path id="Vector_4" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask3_5981_52010)">
|
||||
<g id="Group_4">
|
||||
<g id="Group_5">
|
||||
<g id="Group_6">
|
||||
<path id="Vector_5" d="M18.1273 6.92438L13.7773 0.15625H11.4297V9.82501H13.4321V3.05688L17.7821 9.82501H20.1297V0.15625H18.1273V6.92438Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_5">
|
||||
<mask id="mask4_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_80">
|
||||
<path id="Vector_6" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask4_5981_52010)">
|
||||
<g id="Group_7">
|
||||
<g id="Group_8">
|
||||
<g id="Group_9">
|
||||
<path id="Vector_7" d="M21.7969 2.02094H25.0423V9.82501H27.1139V2.02094H30.3594V0.15625H21.7969V2.02094Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_6">
|
||||
<mask id="mask5_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_71">
|
||||
<path id="Vector_8" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask5_5981_52010)">
|
||||
<g id="Group_10">
|
||||
<g id="Group_11">
|
||||
<g id="Group_12">
|
||||
<path id="Vector_9" d="M38.6442 4.00994H34.0871V0.15625H32.0156V9.82501H34.0871V5.87463H38.6442V9.82501H40.7156V0.15625H38.6442V4.00994Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_7">
|
||||
<mask id="mask6_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_62">
|
||||
<path id="Vector_10" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask6_5981_52010)">
|
||||
<g id="Group_13">
|
||||
<g id="Group_14">
|
||||
<g id="Group_15">
|
||||
<path id="Vector_11" d="M45.3376 2.02094H47.893C48.9152 2.02094 49.4539 2.39387 49.4539 3.09831C49.4539 3.80275 48.9152 4.17569 47.893 4.17569H45.3376V2.02094ZM51.5259 3.09831C51.5259 1.27506 50.186 0.15625 47.9897 0.15625H43.2656V9.82501H45.3376V6.04037H47.6443L49.7164 9.82501H52.0094L49.715 5.75211C50.8666 5.30941 51.5259 4.37721 51.5259 3.09831Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_8">
|
||||
<mask id="mask7_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_53">
|
||||
<path id="Vector_12" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask7_5981_52010)">
|
||||
<g id="Group_16">
|
||||
<g id="Group_17">
|
||||
<g id="Group_18">
|
||||
<path id="Vector_13" d="M57.8732 8.05653C56.2438 8.05653 55.2496 6.89631 55.2496 5.00404C55.2496 3.08416 56.2438 1.92394 57.8732 1.92394C59.4887 1.92394 60.4691 3.08416 60.4691 5.00404C60.4691 6.89631 59.4887 8.05653 57.8732 8.05653ZM57.8732 -0.00976562C55.0839 -0.00976562 53.1094 2.06206 53.1094 5.00404C53.1094 7.91841 55.0839 9.99023 57.8732 9.99023C60.6486 9.99023 62.6094 7.91841 62.6094 5.00404C62.6094 2.06206 60.6486 -0.00976562 57.8732 -0.00976562Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_9">
|
||||
<mask id="mask8_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_44">
|
||||
<path id="Vector_14" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask8_5981_52010)">
|
||||
<g id="Group_19">
|
||||
<g id="Group_20">
|
||||
<g id="Group_21">
|
||||
<path id="Vector_15" d="M69.1794 4.45194H66.6233V2.02094H69.1794C70.2019 2.02094 70.7407 2.43532 70.7407 3.23644C70.7407 4.03756 70.2019 4.45194 69.1794 4.45194ZM69.2762 0.15625H64.5508V9.82501H66.6233V6.31662H69.2762C71.473 6.31662 72.8133 5.15637 72.8133 3.23644C72.8133 1.3165 71.473 0.15625 69.2762 0.15625Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_10">
|
||||
<mask id="mask9_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_35">
|
||||
<path id="Vector_16" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask9_5981_52010)">
|
||||
<g id="Group_22">
|
||||
<g id="Group_23">
|
||||
<g id="Group_24">
|
||||
<path id="Vector_17" d="M86.8413 6.57863C86.4823 7.51786 85.7642 8.05653 84.7837 8.05653C83.1542 8.05653 82.16 6.89631 82.16 5.00404C82.16 3.08416 83.1542 1.92394 84.7837 1.92394C85.7642 1.92394 86.4823 2.46261 86.8413 3.40183H89.0369C88.4984 1.33002 86.8827 -0.00976562 84.7837 -0.00976562C81.9942 -0.00976562 80.0195 2.06206 80.0195 5.00404C80.0195 7.91841 81.9942 9.99023 84.7837 9.99023C86.8965 9.99023 88.5122 8.63664 89.0508 6.57863H86.8413Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_11">
|
||||
<mask id="mask10_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_26">
|
||||
<path id="Vector_18" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask10_5981_52010)">
|
||||
<g id="Group_25">
|
||||
<g id="Group_26">
|
||||
<g id="Group_27">
|
||||
<path id="Vector_19" d="M73.6484 0.15625L77.5033 9.82501H79.6172L75.7624 0.15625H73.6484Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="Clip path group_12">
|
||||
<mask id="mask11_5981_52010" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="-1" width="90" height="11">
|
||||
<g id="__lottie_element_17">
|
||||
<path id="Vector_20" d="M0 -0.0078125H89.375V9.99219H0V-0.0078125Z" fill="white"/>
|
||||
</g>
|
||||
</mask>
|
||||
<g mask="url(#mask11_5981_52010)">
|
||||
<g id="Group_28">
|
||||
<g id="Group_29">
|
||||
<g id="Group_30">
|
||||
<path id="Vector_21" d="M3.64038 5.99893L4.95938 2.60106L6.27838 5.99893H3.64038ZM3.85422 0.15625L0 9.82501H2.15505L2.9433 7.79456H6.97558L7.76371 9.82501H9.91875L6.06453 0.15625H3.85422Z" fill="white" fill-opacity="0.8"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_5981_52010">
|
||||
<rect width="89.375" height="10" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 7.1 KiB |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,16 @@
|
||||
// GENERATE BY script
|
||||
// DON NOT EDIT IT MANUALLY
|
||||
|
||||
import * as React from 'react'
|
||||
import data from './AnthropicDark.json'
|
||||
import IconBase from '@/app/components/base/icons/IconBase'
|
||||
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
|
||||
|
||||
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
|
||||
props,
|
||||
ref,
|
||||
) => <IconBase {...props} ref={ref} data={data as IconData} />)
|
||||
|
||||
Icon.displayName = 'AnthropicDark'
|
||||
|
||||
export default Icon
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,16 @@
|
||||
// GENERATE BY script
|
||||
// DON NOT EDIT IT MANUALLY
|
||||
|
||||
import * as React from 'react'
|
||||
import data from './AnthropicLight.json'
|
||||
import IconBase from '@/app/components/base/icons/IconBase'
|
||||
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
|
||||
|
||||
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
|
||||
props,
|
||||
ref,
|
||||
) => <IconBase {...props} ref={ref} data={data as IconData} />)
|
||||
|
||||
Icon.displayName = 'AnthropicLight'
|
||||
|
||||
export default Icon
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue