Feat: Q&A format segmentation support (#668)
Co-authored-by: jyong <718720800@qq.com> Co-authored-by: StyleZhang <jasonapring2015@outlook.com>pull/671/head
parent
aae2fb8a30
commit
cf93d8d6e2
@ -0,0 +1,123 @@
|
||||
import numpy as np
|
||||
import sklearn.decomposition
|
||||
import pickle
|
||||
import time
|
||||
|
||||
|
||||
# Apply 'Algorithm 1' to the ada-002 embeddings to make them isotropic, taken from the paper:
|
||||
# ALL-BUT-THE-TOP: SIMPLE AND EFFECTIVE POST- PROCESSING FOR WORD REPRESENTATIONS
|
||||
# Jiaqi Mu, Pramod Viswanath
|
||||
|
||||
# This uses Principal Component Analysis (PCA) to 'evenly distribute' the embedding vectors (make them isotropic)
|
||||
# For more information on PCA, see https://jamesmccaffrey.wordpress.com/2021/07/16/computing-pca-using-numpy-without-scikit/
|
||||
|
||||
|
||||
# get the file pointer of the pickle containing the embeddings
|
||||
fp = open('/path/to/your/data/Embedding-Latest.pkl', 'rb')
|
||||
|
||||
|
||||
# the embedding data here is a dict consisting of key / value pairs
|
||||
# the key is the hash of the message (SHA3-256), the value is the embedding from ada-002 (array of dimension 1536)
|
||||
# the hash can be used to lookup the orignal text in a database
|
||||
E = pickle.load(fp) # load the data into memory
|
||||
|
||||
# seperate the keys (hashes) and values (embeddings) into seperate vectors
|
||||
K = list(E.keys()) # vector of all the hash values
|
||||
X = np.array(list(E.values())) # vector of all the embeddings, converted to numpy arrays
|
||||
|
||||
|
||||
# list the total number of embeddings
|
||||
# this can be truncated if there are too many embeddings to do PCA on
|
||||
print(f"Total number of embeddings: {len(X)}")
|
||||
|
||||
# get dimension of embeddings, used later
|
||||
Dim = len(X[0])
|
||||
|
||||
# flash out the first few embeddings
|
||||
print("First two embeddings are: ")
|
||||
print(X[0])
|
||||
print(f"First embedding length: {len(X[0])}")
|
||||
print(X[1])
|
||||
print(f"Second embedding length: {len(X[1])}")
|
||||
|
||||
|
||||
# compute the mean of all the embeddings, and flash the result
|
||||
mu = np.mean(X, axis=0) # same as mu in paper
|
||||
print(f"Mean embedding vector: {mu}")
|
||||
print(f"Mean embedding vector length: {len(mu)}")
|
||||
|
||||
|
||||
# subtract the mean vector from each embedding vector ... vectorized in numpy
|
||||
X_tilde = X - mu # same as v_tilde(w) in paper
|
||||
|
||||
|
||||
|
||||
# do the heavy lifting of extracting the principal components
|
||||
# note that this is a function of the embeddings you currently have here, and this set may grow over time
|
||||
# therefore the PCA basis vectors may change over time, and your final isotropic embeddings may drift over time
|
||||
# but the drift should stabilize after you have extracted enough embedding data to characterize the nature of the embedding engine
|
||||
print(f"Performing PCA on the normalized embeddings ...")
|
||||
pca = sklearn.decomposition.PCA() # new object
|
||||
TICK = time.time() # start timer
|
||||
pca.fit(X_tilde) # do the heavy lifting!
|
||||
TOCK = time.time() # end timer
|
||||
DELTA = TOCK - TICK
|
||||
|
||||
print(f"PCA finished in {DELTA} seconds ...")
|
||||
|
||||
# dimensional reduction stage (the only hyperparameter)
|
||||
# pick max dimension of PCA components to express embddings
|
||||
# in general this is some integer less than or equal to the dimension of your embeddings
|
||||
# it could be set as a high percentile, say 95th percentile of pca.explained_variance_ratio_
|
||||
# but just hardcoding a constant here
|
||||
D = 15 # hyperparameter on dimension (out of 1536 for ada-002), paper recommeds D = Dim/100
|
||||
|
||||
|
||||
# form the set of v_prime(w), which is the final embedding
|
||||
# this could be vectorized in numpy to speed it up, but coding it directly here in a double for-loop to avoid errors and to be transparent
|
||||
E_prime = dict() # output dict of the new embeddings
|
||||
N = len(X_tilde)
|
||||
N10 = round(N/10)
|
||||
U = pca.components_ # set of PCA basis vectors, sorted by most significant to least significant
|
||||
print(f"Shape of full set of PCA componenents {U.shape}")
|
||||
U = U[0:D,:] # take the top D dimensions (or take them all if D is the size of the embedding vector)
|
||||
print(f"Shape of downselected PCA componenents {U.shape}")
|
||||
for ii in range(N):
|
||||
v_tilde = X_tilde[ii]
|
||||
v = X[ii]
|
||||
v_projection = np.zeros(Dim) # start to build the projection
|
||||
# project the original embedding onto the PCA basis vectors, use only first D dimensions
|
||||
for jj in range(D):
|
||||
u_jj = U[jj,:] # vector
|
||||
v_jj = np.dot(u_jj,v) # scaler
|
||||
v_projection += v_jj*u_jj # vector
|
||||
v_prime = v_tilde - v_projection # final embedding vector
|
||||
v_prime = v_prime/np.linalg.norm(v_prime) # create unit vector
|
||||
E_prime[K[ii]] = v_prime
|
||||
|
||||
if (ii%N10 == 0) or (ii == N-1):
|
||||
print(f"Finished with {ii+1} embeddings out of {N} ({round(100*ii/N)}% done)")
|
||||
|
||||
|
||||
# save as new pickle
|
||||
print("Saving new pickle ...")
|
||||
embeddingName = '/path/to/your/data/Embedding-Latest-Isotropic.pkl'
|
||||
with open(embeddingName, 'wb') as f: # Python 3: open(..., 'wb')
|
||||
pickle.dump([E_prime,mu,U], f)
|
||||
print(embeddingName)
|
||||
|
||||
print("Done!")
|
||||
|
||||
# When working with live data with a new embedding from ada-002, be sure to tranform it first with this function before comparing it
|
||||
#
|
||||
def projectEmbedding(v,mu,U):
|
||||
v = np.array(v)
|
||||
v_tilde = v - mu
|
||||
v_projection = np.zeros(len(v)) # start to build the projection
|
||||
# project the original embedding onto the PCA basis vectors, use only first D dimensions
|
||||
for u in U:
|
||||
v_jj = np.dot(u,v) # scaler
|
||||
v_projection += v_jj*u # vector
|
||||
v_prime = v_tilde - v_projection # final embedding vector
|
||||
v_prime = v_prime/np.linalg.norm(v_prime) # create unit vector
|
||||
return v_prime
|
||||
@ -0,0 +1,102 @@
|
||||
from flask import current_app
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
from langchain.tools import BaseTool
|
||||
|
||||
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
|
||||
from core.embedding.cached_embedding import CacheEmbedding
|
||||
from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig
|
||||
from core.index.vector_index.vector_index import VectorIndex
|
||||
from core.llm.llm_builder import LLMBuilder
|
||||
from models.dataset import Dataset, DocumentSegment
|
||||
|
||||
|
||||
class DatasetTool(BaseTool):
|
||||
"""Tool for querying a Dataset."""
|
||||
|
||||
dataset: Dataset
|
||||
k: int = 2
|
||||
|
||||
def _run(self, tool_input: str) -> str:
|
||||
if self.dataset.indexing_technique == "economy":
|
||||
# use keyword table query
|
||||
kw_table_index = KeywordTableIndex(
|
||||
dataset=self.dataset,
|
||||
config=KeywordTableConfig(
|
||||
max_keywords_per_chunk=5
|
||||
)
|
||||
)
|
||||
|
||||
documents = kw_table_index.search(tool_input, search_kwargs={'k': self.k})
|
||||
return str("\n".join([document.page_content for document in documents]))
|
||||
else:
|
||||
model_credentials = LLMBuilder.get_model_credentials(
|
||||
tenant_id=self.dataset.tenant_id,
|
||||
model_provider=LLMBuilder.get_default_provider(self.dataset.tenant_id, 'text-embedding-ada-002'),
|
||||
model_name='text-embedding-ada-002'
|
||||
)
|
||||
|
||||
embeddings = CacheEmbedding(OpenAIEmbeddings(
|
||||
**model_credentials
|
||||
))
|
||||
|
||||
vector_index = VectorIndex(
|
||||
dataset=self.dataset,
|
||||
config=current_app.config,
|
||||
embeddings=embeddings
|
||||
)
|
||||
|
||||
documents = vector_index.search(
|
||||
tool_input,
|
||||
search_type='similarity',
|
||||
search_kwargs={
|
||||
'k': self.k
|
||||
}
|
||||
)
|
||||
|
||||
hit_callback = DatasetIndexToolCallbackHandler(self.dataset.id)
|
||||
hit_callback.on_tool_end(documents)
|
||||
document_context_list = []
|
||||
index_node_ids = [document.metadata['doc_id'] for document in documents]
|
||||
segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None),
|
||||
DocumentSegment.status == 'completed',
|
||||
DocumentSegment.enabled == True,
|
||||
DocumentSegment.index_node_id.in_(index_node_ids)
|
||||
).all()
|
||||
|
||||
if segments:
|
||||
for segment in segments:
|
||||
if segment.answer:
|
||||
document_context_list.append(segment.answer)
|
||||
else:
|
||||
document_context_list.append(segment.content)
|
||||
|
||||
return str("\n".join(document_context_list))
|
||||
|
||||
async def _arun(self, tool_input: str) -> str:
|
||||
model_credentials = LLMBuilder.get_model_credentials(
|
||||
tenant_id=self.dataset.tenant_id,
|
||||
model_provider=LLMBuilder.get_default_provider(self.dataset.tenant_id, 'text-embedding-ada-002'),
|
||||
model_name='text-embedding-ada-002'
|
||||
)
|
||||
|
||||
embeddings = CacheEmbedding(OpenAIEmbeddings(
|
||||
**model_credentials
|
||||
))
|
||||
|
||||
vector_index = VectorIndex(
|
||||
dataset=self.dataset,
|
||||
config=current_app.config,
|
||||
embeddings=embeddings
|
||||
)
|
||||
|
||||
documents = await vector_index.asearch(
|
||||
tool_input,
|
||||
search_type='similarity',
|
||||
search_kwargs={
|
||||
'k': 10
|
||||
}
|
||||
)
|
||||
|
||||
hit_callback = DatasetIndexToolCallbackHandler(self.dataset.id)
|
||||
hit_callback.on_tool_end(documents)
|
||||
return str("\n".join([document.page_content for document in documents]))
|
||||
@ -0,0 +1,42 @@
|
||||
"""add_qa_model_support
|
||||
|
||||
Revision ID: 8d2d099ceb74
|
||||
Revises: a5b56fb053ef
|
||||
Create Date: 2023-07-18 15:25:15.293438
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '8d2d099ceb74'
|
||||
down_revision = '7ce5a52e4eee'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('document_segments', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('answer', sa.Text(), nullable=True))
|
||||
batch_op.add_column(sa.Column('updated_by', postgresql.UUID(), nullable=True))
|
||||
batch_op.add_column(sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False))
|
||||
|
||||
with op.batch_alter_table('documents', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('doc_form', sa.String(length=255), server_default=sa.text("'text_model'::character varying"), nullable=False))
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('documents', schema=None) as batch_op:
|
||||
batch_op.drop_column('doc_form')
|
||||
|
||||
with op.batch_alter_table('document_segments', schema=None) as batch_op:
|
||||
batch_op.drop_column('updated_at')
|
||||
batch_op.drop_column('updated_by')
|
||||
batch_op.drop_column('answer')
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@ -0,0 +1,102 @@
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
from typing import Optional, List
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from langchain.schema import Document
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.index import IndexBuilder
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import DocumentSegment
|
||||
|
||||
|
||||
@shared_task
|
||||
def create_segment_to_index_task(segment_id: str, keywords: Optional[List[str]] = None):
|
||||
"""
|
||||
Async create segment to index
|
||||
:param segment_id:
|
||||
:param keywords:
|
||||
Usage: create_segment_to_index_task.delay(segment_id)
|
||||
"""
|
||||
logging.info(click.style('Start create segment to index: {}'.format(segment_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
|
||||
if not segment:
|
||||
raise NotFound('Segment not found')
|
||||
|
||||
if segment.status != 'waiting':
|
||||
return
|
||||
|
||||
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
|
||||
|
||||
try:
|
||||
# update segment status to indexing
|
||||
update_params = {
|
||||
DocumentSegment.status: "indexing",
|
||||
DocumentSegment.indexing_at: datetime.datetime.utcnow()
|
||||
}
|
||||
DocumentSegment.query.filter_by(id=segment.id).update(update_params)
|
||||
db.session.commit()
|
||||
document = Document(
|
||||
page_content=segment.content,
|
||||
metadata={
|
||||
"doc_id": segment.index_node_id,
|
||||
"doc_hash": segment.index_node_hash,
|
||||
"document_id": segment.document_id,
|
||||
"dataset_id": segment.dataset_id,
|
||||
}
|
||||
)
|
||||
|
||||
dataset = segment.dataset
|
||||
|
||||
if not dataset:
|
||||
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
dataset_document = segment.document
|
||||
|
||||
if not dataset_document:
|
||||
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
|
||||
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
# save vector index
|
||||
index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
if index:
|
||||
index.add_texts([document], duplicate_check=True)
|
||||
|
||||
# save keyword index
|
||||
index = IndexBuilder.get_index(dataset, 'economy')
|
||||
if index:
|
||||
if keywords and len(keywords) > 0:
|
||||
index.create_segment_keywords(segment.index_node_id, keywords)
|
||||
else:
|
||||
index.add_texts([document])
|
||||
|
||||
# update segment to completed
|
||||
update_params = {
|
||||
DocumentSegment.status: "completed",
|
||||
DocumentSegment.completed_at: datetime.datetime.utcnow()
|
||||
}
|
||||
DocumentSegment.query.filter_by(id=segment.id).update(update_params)
|
||||
db.session.commit()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Segment created to index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
|
||||
except Exception as e:
|
||||
logging.exception("create segment to index failed")
|
||||
segment.enabled = False
|
||||
segment.disabled_at = datetime.datetime.utcnow()
|
||||
segment.status = 'error'
|
||||
segment.error = str(e)
|
||||
db.session.commit()
|
||||
finally:
|
||||
redis_client.delete(indexing_cache_key)
|
||||
@ -0,0 +1,24 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
import requests
|
||||
from celery import shared_task
|
||||
|
||||
from core.generator.llm_generator import LLMGenerator
|
||||
|
||||
|
||||
@shared_task
|
||||
def generate_test_task():
|
||||
logging.info(click.style('Start generate test', fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
#res = requests.post('https://api.openai.com/v1/chat/completions')
|
||||
answer = LLMGenerator.generate_conversation_name('84b2202c-c359-46b7-a810-bce50feaa4d1', 'avb', 'ccc')
|
||||
print(f'answer: {answer}')
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Conversation test, latency: {}'.format(end_at - start_at), fg='green'))
|
||||
except Exception:
|
||||
logging.exception("generate test failed")
|
||||
@ -0,0 +1,114 @@
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
from typing import List, Optional
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from langchain.schema import Document
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.index import IndexBuilder
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import DocumentSegment
|
||||
|
||||
|
||||
@shared_task
|
||||
def update_segment_index_task(segment_id: str, keywords: Optional[List[str]] = None):
|
||||
"""
|
||||
Async update segment index
|
||||
:param segment_id:
|
||||
:param keywords:
|
||||
Usage: update_segment_index_task.delay(segment_id)
|
||||
"""
|
||||
logging.info(click.style('Start update segment index: {}'.format(segment_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
|
||||
if not segment:
|
||||
raise NotFound('Segment not found')
|
||||
|
||||
if segment.status != 'updating':
|
||||
return
|
||||
|
||||
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
|
||||
|
||||
try:
|
||||
dataset = segment.dataset
|
||||
|
||||
if not dataset:
|
||||
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
dataset_document = segment.document
|
||||
|
||||
if not dataset_document:
|
||||
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
|
||||
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
# update segment status to indexing
|
||||
update_params = {
|
||||
DocumentSegment.status: "indexing",
|
||||
DocumentSegment.indexing_at: datetime.datetime.utcnow()
|
||||
}
|
||||
DocumentSegment.query.filter_by(id=segment.id).update(update_params)
|
||||
db.session.commit()
|
||||
|
||||
vector_index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
kw_index = IndexBuilder.get_index(dataset, 'economy')
|
||||
|
||||
# delete from vector index
|
||||
if vector_index:
|
||||
vector_index.delete_by_ids([segment.index_node_id])
|
||||
|
||||
# delete from keyword index
|
||||
kw_index.delete_by_ids([segment.index_node_id])
|
||||
|
||||
# add new index
|
||||
document = Document(
|
||||
page_content=segment.content,
|
||||
metadata={
|
||||
"doc_id": segment.index_node_id,
|
||||
"doc_hash": segment.index_node_hash,
|
||||
"document_id": segment.document_id,
|
||||
"dataset_id": segment.dataset_id,
|
||||
}
|
||||
)
|
||||
|
||||
# save vector index
|
||||
index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
if index:
|
||||
index.add_texts([document], duplicate_check=True)
|
||||
|
||||
# save keyword index
|
||||
index = IndexBuilder.get_index(dataset, 'economy')
|
||||
if index:
|
||||
if keywords and len(keywords) > 0:
|
||||
index.create_segment_keywords(segment.index_node_id, keywords)
|
||||
else:
|
||||
index.add_texts([document])
|
||||
|
||||
# update segment to completed
|
||||
update_params = {
|
||||
DocumentSegment.status: "completed",
|
||||
DocumentSegment.completed_at: datetime.datetime.utcnow()
|
||||
}
|
||||
DocumentSegment.query.filter_by(id=segment.id).update(update_params)
|
||||
db.session.commit()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Segment update index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
|
||||
except Exception as e:
|
||||
logging.exception("update segment index failed")
|
||||
segment.enabled = False
|
||||
segment.disabled_at = datetime.datetime.utcnow()
|
||||
segment.status = 'error'
|
||||
segment.error = str(e)
|
||||
db.session.commit()
|
||||
finally:
|
||||
redis_client.delete(indexing_cache_key)
|
||||
@ -0,0 +1,81 @@
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
from typing import List, Optional
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from langchain.schema import Document
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.index import IndexBuilder
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import DocumentSegment
|
||||
|
||||
|
||||
@shared_task
|
||||
def update_segment_keyword_index_task(segment_id: str):
|
||||
"""
|
||||
Async update segment index
|
||||
:param segment_id:
|
||||
Usage: update_segment_keyword_index_task.delay(segment_id)
|
||||
"""
|
||||
logging.info(click.style('Start update segment keyword index: {}'.format(segment_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
|
||||
if not segment:
|
||||
raise NotFound('Segment not found')
|
||||
|
||||
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
|
||||
|
||||
try:
|
||||
dataset = segment.dataset
|
||||
|
||||
if not dataset:
|
||||
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
dataset_document = segment.document
|
||||
|
||||
if not dataset_document:
|
||||
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
|
||||
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
kw_index = IndexBuilder.get_index(dataset, 'economy')
|
||||
|
||||
# delete from keyword index
|
||||
kw_index.delete_by_ids([segment.index_node_id])
|
||||
|
||||
# add new index
|
||||
document = Document(
|
||||
page_content=segment.content,
|
||||
metadata={
|
||||
"doc_id": segment.index_node_id,
|
||||
"doc_hash": segment.index_node_hash,
|
||||
"document_id": segment.document_id,
|
||||
"dataset_id": segment.dataset_id,
|
||||
}
|
||||
)
|
||||
|
||||
# save keyword index
|
||||
index = IndexBuilder.get_index(dataset, 'economy')
|
||||
if index:
|
||||
index.update_segment_keywords_index(segment.index_node_id, segment.keywords)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Segment update index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
|
||||
except Exception as e:
|
||||
logging.exception("update segment index failed")
|
||||
segment.enabled = False
|
||||
segment.disabled_at = datetime.datetime.utcnow()
|
||||
segment.status = 'error'
|
||||
segment.error = str(e)
|
||||
db.session.commit()
|
||||
finally:
|
||||
redis_client.delete(indexing_cache_key)
|
||||
@ -0,0 +1,52 @@
|
||||
import { forwardRef, useEffect, useRef } from 'react'
|
||||
import cn from 'classnames'
|
||||
|
||||
type AutoHeightTextareaProps =
|
||||
& React.DetailedHTMLProps<React.TextareaHTMLAttributes<HTMLTextAreaElement>, HTMLTextAreaElement>
|
||||
& { outerClassName?: string }
|
||||
|
||||
const AutoHeightTextarea = forwardRef<HTMLTextAreaElement, AutoHeightTextareaProps>(
|
||||
(
|
||||
{
|
||||
outerClassName,
|
||||
value,
|
||||
className,
|
||||
placeholder,
|
||||
autoFocus,
|
||||
disabled,
|
||||
...rest
|
||||
},
|
||||
outRef,
|
||||
) => {
|
||||
const innerRef = useRef<HTMLTextAreaElement>(null)
|
||||
const ref = outRef || innerRef
|
||||
|
||||
useEffect(() => {
|
||||
if (autoFocus && !disabled && value) {
|
||||
if (typeof ref !== 'function') {
|
||||
ref.current?.setSelectionRange(`${value}`.length, `${value}`.length)
|
||||
ref.current?.focus()
|
||||
}
|
||||
}
|
||||
}, [autoFocus, disabled, ref])
|
||||
return (
|
||||
<div className={outerClassName}>
|
||||
<div className='relative'>
|
||||
<div className={cn(className, 'invisible whitespace-pre-wrap break-all')}>
|
||||
{!value ? placeholder : `${value}`.replace(/\n$/, '\n ')}
|
||||
</div>
|
||||
<textarea
|
||||
ref={ref}
|
||||
placeholder={placeholder}
|
||||
className={cn(className, 'disabled:bg-transparent absolute inset-0 outline-none border-none appearance-none resize-none')}
|
||||
value={value}
|
||||
disabled={disabled}
|
||||
{...rest}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
export default AutoHeightTextarea
|
||||
@ -0,0 +1,4 @@
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M8.77438 6.6665H12.5591C12.9105 6.66649 13.2137 6.66648 13.4634 6.68688C13.727 6.70842 13.9891 6.75596 14.2414 6.88449C14.6177 7.07624 14.9237 7.3822 15.1154 7.75852C15.244 8.01078 15.2915 8.27292 15.313 8.53649C15.3334 8.7862 15.3334 9.08938 15.3334 9.44082V11.2974C15.3334 11.5898 15.3334 11.8421 15.3192 12.0509C15.3042 12.2708 15.2712 12.4908 15.1812 12.7081C14.9782 13.1981 14.5888 13.5875 14.0988 13.7905C13.8815 13.8805 13.6616 13.9135 13.4417 13.9285C13.4068 13.9308 13.3707 13.9328 13.3334 13.9345V14.6665C13.3334 14.9147 13.1955 15.1424 12.9756 15.2573C12.7556 15.3723 12.49 15.3556 12.2862 15.2139L10.8353 14.2051C10.6118 14.0498 10.5666 14.0214 10.5238 14.0021C10.4746 13.9798 10.4228 13.9635 10.3696 13.9537C10.3235 13.9452 10.2702 13.9427 9.99803 13.9427H8.7744C8.42296 13.9427 8.11978 13.9427 7.87006 13.9223C7.6065 13.9008 7.34435 13.8532 7.0921 13.7247C6.71578 13.533 6.40981 13.227 6.21807 12.8507C6.08954 12.5984 6.04199 12.3363 6.02046 12.0727C6.00006 11.823 6.00007 11.5198 6.00008 11.1684V9.44081C6.00007 9.08938 6.00006 8.7862 6.02046 8.53649C6.04199 8.27292 6.08954 8.01078 6.21807 7.75852C6.40981 7.3822 6.71578 7.07624 7.0921 6.88449C7.34435 6.75596 7.6065 6.70842 7.87006 6.68688C8.11978 6.66648 8.42295 6.66649 8.77438 6.6665Z" fill="#444CE7"/>
|
||||
<path d="M9.4943 0.666504H4.5059C3.96926 0.666496 3.52635 0.666489 3.16555 0.695967C2.79082 0.726584 2.44635 0.792293 2.12279 0.957154C1.62103 1.21282 1.21308 1.62076 0.957417 2.12253C0.792557 2.44609 0.726847 2.79056 0.69623 3.16529C0.666752 3.52608 0.666759 3.96899 0.666768 4.50564L0.666758 7.6804C0.666669 7.97482 0.666603 8.19298 0.694924 8.38632C0.86568 9.55207 1.78121 10.4676 2.94695 10.6383C2.99461 10.6453 3.02432 10.6632 3.03714 10.6739L3.03714 11.7257C3.03711 11.9075 3.03708 12.0858 3.04976 12.2291C3.06103 12.3565 3.09053 12.6202 3.27795 12.8388C3.48686 13.0825 3.80005 13.2111 4.11993 13.1845C4.40689 13.1607 4.61323 12.9938 4.71072 12.9111C4.73849 12.8875 4.76726 12.8618 4.7968 12.8344C4.73509 12.594 4.70707 12.3709 4.69157 12.1813C4.66659 11.8756 4.66668 11.5224 4.66676 11.1966V9.41261C4.66668 9.08685 4.66659 8.73364 4.69157 8.42793C4.71984 8.08191 4.78981 7.62476 5.03008 7.15322C5.34965 6.52601 5.85959 6.01608 6.4868 5.6965C6.95834 5.45624 7.41549 5.38627 7.7615 5.358C8.06722 5.33302 8.42041 5.3331 8.74617 5.33318H12.5873C12.8311 5.33312 13.0903 5.33306 13.3334 5.3435V4.50562C13.3334 3.96898 13.3334 3.52608 13.304 3.16529C13.2734 2.79056 13.2076 2.44609 13.0428 2.12253C12.7871 1.62076 12.3792 1.21282 11.8774 0.957154C11.5539 0.792293 11.2094 0.726584 10.8347 0.695967C10.4739 0.666489 10.0309 0.666496 9.4943 0.666504Z" fill="#444CE7"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.7 KiB |
@ -0,0 +1,5 @@
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="Icon">
|
||||
<path id="Icon_2" d="M7.99998 13.3332H14M2 13.3332H3.11636C3.44248 13.3332 3.60554 13.3332 3.75899 13.2963C3.89504 13.2637 4.0251 13.2098 4.1444 13.1367C4.27895 13.0542 4.39425 12.9389 4.62486 12.7083L13 4.33316C13.5523 3.78087 13.5523 2.88544 13 2.33316C12.4477 1.78087 11.5523 1.78087 11 2.33316L2.62484 10.7083C2.39424 10.9389 2.27894 11.0542 2.19648 11.1888C2.12338 11.3081 2.0695 11.4381 2.03684 11.5742C2 11.7276 2 11.8907 2 12.2168V13.3332Z" stroke="#667085" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 656 B |
@ -0,0 +1,5 @@
|
||||
<svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="hash-02">
|
||||
<path id="Icon" d="M4.74999 1.5L3.24999 10.5M8.74998 1.5L7.24998 10.5M10.25 4H1.75M9.75 8H1.25" stroke="#98A2B3" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 287 B |
@ -0,0 +1,37 @@
|
||||
{
|
||||
"icon": {
|
||||
"type": "element",
|
||||
"isRootNode": true,
|
||||
"name": "svg",
|
||||
"attributes": {
|
||||
"width": "16",
|
||||
"height": "16",
|
||||
"viewBox": "0 0 16 16",
|
||||
"fill": "none",
|
||||
"xmlns": "http://www.w3.org/2000/svg"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "path",
|
||||
"attributes": {
|
||||
"fill-rule": "evenodd",
|
||||
"clip-rule": "evenodd",
|
||||
"d": "M8.77438 6.6665H12.5591C12.9105 6.66649 13.2137 6.66648 13.4634 6.68688C13.727 6.70842 13.9891 6.75596 14.2414 6.88449C14.6177 7.07624 14.9237 7.3822 15.1154 7.75852C15.244 8.01078 15.2915 8.27292 15.313 8.53649C15.3334 8.7862 15.3334 9.08938 15.3334 9.44082V11.2974C15.3334 11.5898 15.3334 11.8421 15.3192 12.0509C15.3042 12.2708 15.2712 12.4908 15.1812 12.7081C14.9782 13.1981 14.5888 13.5875 14.0988 13.7905C13.8815 13.8805 13.6616 13.9135 13.4417 13.9285C13.4068 13.9308 13.3707 13.9328 13.3334 13.9345V14.6665C13.3334 14.9147 13.1955 15.1424 12.9756 15.2573C12.7556 15.3723 12.49 15.3556 12.2862 15.2139L10.8353 14.2051C10.6118 14.0498 10.5666 14.0214 10.5238 14.0021C10.4746 13.9798 10.4228 13.9635 10.3696 13.9537C10.3235 13.9452 10.2702 13.9427 9.99803 13.9427H8.7744C8.42296 13.9427 8.11978 13.9427 7.87006 13.9223C7.6065 13.9008 7.34435 13.8532 7.0921 13.7247C6.71578 13.533 6.40981 13.227 6.21807 12.8507C6.08954 12.5984 6.04199 12.3363 6.02046 12.0727C6.00006 11.823 6.00007 11.5198 6.00008 11.1684V9.44081C6.00007 9.08938 6.00006 8.7862 6.02046 8.53649C6.04199 8.27292 6.08954 8.01078 6.21807 7.75852C6.40981 7.3822 6.71578 7.07624 7.0921 6.88449C7.34435 6.75596 7.6065 6.70842 7.87006 6.68688C8.11978 6.66648 8.42295 6.66649 8.77438 6.6665Z",
|
||||
"fill": "#444CE7"
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"type": "element",
|
||||
"name": "path",
|
||||
"attributes": {
|
||||
"d": "M9.4943 0.666504H4.5059C3.96926 0.666496 3.52635 0.666489 3.16555 0.695967C2.79082 0.726584 2.44635 0.792293 2.12279 0.957154C1.62103 1.21282 1.21308 1.62076 0.957417 2.12253C0.792557 2.44609 0.726847 2.79056 0.69623 3.16529C0.666752 3.52608 0.666759 3.96899 0.666768 4.50564L0.666758 7.6804C0.666669 7.97482 0.666603 8.19298 0.694924 8.38632C0.86568 9.55207 1.78121 10.4676 2.94695 10.6383C2.99461 10.6453 3.02432 10.6632 3.03714 10.6739L3.03714 11.7257C3.03711 11.9075 3.03708 12.0858 3.04976 12.2291C3.06103 12.3565 3.09053 12.6202 3.27795 12.8388C3.48686 13.0825 3.80005 13.2111 4.11993 13.1845C4.40689 13.1607 4.61323 12.9938 4.71072 12.9111C4.73849 12.8875 4.76726 12.8618 4.7968 12.8344C4.73509 12.594 4.70707 12.3709 4.69157 12.1813C4.66659 11.8756 4.66668 11.5224 4.66676 11.1966V9.41261C4.66668 9.08685 4.66659 8.73364 4.69157 8.42793C4.71984 8.08191 4.78981 7.62476 5.03008 7.15322C5.34965 6.52601 5.85959 6.01608 6.4868 5.6965C6.95834 5.45624 7.41549 5.38627 7.7615 5.358C8.06722 5.33302 8.42041 5.3331 8.74617 5.33318H12.5873C12.8311 5.33312 13.0903 5.33306 13.3334 5.3435V4.50562C13.3334 3.96898 13.3334 3.52608 13.304 3.16529C13.2734 2.79056 13.2076 2.44609 13.0428 2.12253C12.7871 1.62076 12.3792 1.21282 11.8774 0.957154C11.5539 0.792293 11.2094 0.726584 10.8347 0.695967C10.4739 0.666489 10.0309 0.666496 9.4943 0.666504Z",
|
||||
"fill": "#444CE7"
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
},
|
||||
"name": "MessageChatSquare"
|
||||
}
|
||||
@ -0,0 +1,14 @@
|
||||
// GENERATE BY script
|
||||
// DON NOT EDIT IT MANUALLY
|
||||
|
||||
import * as React from 'react'
|
||||
import data from './MessageChatSquare.json'
|
||||
import IconBase from '@/app/components/base/icons/IconBase'
|
||||
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
|
||||
|
||||
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
|
||||
props,
|
||||
ref,
|
||||
) => <IconBase {...props} ref={ref} data={data as IconData} />)
|
||||
|
||||
export default Icon
|
||||
@ -1,2 +1,3 @@
|
||||
export { default as Dify } from './Dify'
|
||||
export { default as Github } from './Github'
|
||||
export { default as MessageChatSquare } from './MessageChatSquare'
|
||||
|
||||
@ -0,0 +1,39 @@
|
||||
{
|
||||
"icon": {
|
||||
"type": "element",
|
||||
"isRootNode": true,
|
||||
"name": "svg",
|
||||
"attributes": {
|
||||
"width": "16",
|
||||
"height": "16",
|
||||
"viewBox": "0 0 16 16",
|
||||
"fill": "none",
|
||||
"xmlns": "http://www.w3.org/2000/svg"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "g",
|
||||
"attributes": {
|
||||
"id": "Icon"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "path",
|
||||
"attributes": {
|
||||
"id": "Icon_2",
|
||||
"d": "M7.99998 13.3332H14M2 13.3332H3.11636C3.44248 13.3332 3.60554 13.3332 3.75899 13.2963C3.89504 13.2637 4.0251 13.2098 4.1444 13.1367C4.27895 13.0542 4.39425 12.9389 4.62486 12.7083L13 4.33316C13.5523 3.78087 13.5523 2.88544 13 2.33316C12.4477 1.78087 11.5523 1.78087 11 2.33316L2.62484 10.7083C2.39424 10.9389 2.27894 11.0542 2.19648 11.1888C2.12338 11.3081 2.0695 11.4381 2.03684 11.5742C2 11.7276 2 11.8907 2 12.2168V13.3332Z",
|
||||
"stroke": "currentColor",
|
||||
"stroke-width": "1.5",
|
||||
"stroke-linecap": "round",
|
||||
"stroke-linejoin": "round"
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"name": "Edit03"
|
||||
}
|
||||
@ -0,0 +1,14 @@
|
||||
// GENERATE BY script
|
||||
// DON NOT EDIT IT MANUALLY
|
||||
|
||||
import * as React from 'react'
|
||||
import data from './Edit03.json'
|
||||
import IconBase from '@/app/components/base/icons/IconBase'
|
||||
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
|
||||
|
||||
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
|
||||
props,
|
||||
ref,
|
||||
) => <IconBase {...props} ref={ref} data={data as IconData} />)
|
||||
|
||||
export default Icon
|
||||
@ -0,0 +1,38 @@
|
||||
{
|
||||
"icon": {
|
||||
"type": "element",
|
||||
"isRootNode": true,
|
||||
"name": "svg",
|
||||
"attributes": {
|
||||
"width": "12",
|
||||
"height": "12",
|
||||
"viewBox": "0 0 12 12",
|
||||
"fill": "none",
|
||||
"xmlns": "http://www.w3.org/2000/svg"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "g",
|
||||
"attributes": {
|
||||
"id": "hash-02"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "path",
|
||||
"attributes": {
|
||||
"id": "Icon",
|
||||
"d": "M4.74999 1.5L3.24999 10.5M8.74998 1.5L7.24998 10.5M10.25 4H1.75M9.75 8H1.25",
|
||||
"stroke": "currentColor",
|
||||
"stroke-linecap": "round",
|
||||
"stroke-linejoin": "round"
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"name": "Hash02"
|
||||
}
|
||||
@ -0,0 +1,14 @@
|
||||
// GENERATE BY script
|
||||
// DON NOT EDIT IT MANUALLY
|
||||
|
||||
import * as React from 'react'
|
||||
import data from './Hash02.json'
|
||||
import IconBase from '@/app/components/base/icons/IconBase'
|
||||
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
|
||||
|
||||
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
|
||||
props,
|
||||
ref,
|
||||
) => <IconBase {...props} ref={ref} data={data as IconData} />)
|
||||
|
||||
export default Icon
|
||||
@ -0,0 +1,140 @@
|
||||
import { memo, useState } from 'react'
|
||||
import type { FC } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { useContext } from 'use-context-selector'
|
||||
import { useParams } from 'next/navigation'
|
||||
import Modal from '@/app/components/base/modal'
|
||||
import Button from '@/app/components/base/button'
|
||||
import AutoHeightTextarea from '@/app/components/base/auto-height-textarea/common'
|
||||
import { Hash02, XClose } from '@/app/components/base/icons/src/vender/line/general'
|
||||
import { ToastContext } from '@/app/components/base/toast'
|
||||
import type { SegmentUpdator } from '@/models/datasets'
|
||||
import { addSegment } from '@/service/datasets'
|
||||
|
||||
type NewSegmentModalProps = {
|
||||
isShow: boolean
|
||||
onCancel: () => void
|
||||
docForm: string
|
||||
onSave: () => void
|
||||
}
|
||||
|
||||
const NewSegmentModal: FC<NewSegmentModalProps> = memo(({
|
||||
isShow,
|
||||
onCancel,
|
||||
docForm,
|
||||
onSave,
|
||||
}) => {
|
||||
const { t } = useTranslation()
|
||||
const { notify } = useContext(ToastContext)
|
||||
const [question, setQuestion] = useState('')
|
||||
const [answer, setAnswer] = useState('')
|
||||
const { datasetId, documentId } = useParams()
|
||||
|
||||
const handleCancel = () => {
|
||||
setQuestion('')
|
||||
setAnswer('')
|
||||
onCancel()
|
||||
}
|
||||
|
||||
const handleSave = async () => {
|
||||
const params: SegmentUpdator = { content: '' }
|
||||
if (docForm === 'qa_model') {
|
||||
if (!question.trim())
|
||||
return notify({ type: 'error', message: t('datasetDocuments.segment.questionEmpty') })
|
||||
if (!answer.trim())
|
||||
return notify({ type: 'error', message: t('datasetDocuments.segment.answerEmpty') })
|
||||
|
||||
params.content = question
|
||||
params.answer = answer
|
||||
}
|
||||
else {
|
||||
if (!question.trim())
|
||||
return notify({ type: 'error', message: t('datasetDocuments.segment.contentEmpty') })
|
||||
|
||||
params.content = question
|
||||
}
|
||||
|
||||
await addSegment({ datasetId, documentId, body: params })
|
||||
notify({ type: 'success', message: t('common.actionMsg.modifiedSuccessfully') })
|
||||
handleCancel()
|
||||
onSave()
|
||||
}
|
||||
|
||||
const renderContent = () => {
|
||||
if (docForm === 'qa_model') {
|
||||
return (
|
||||
<>
|
||||
<div className='mb-1 text-xs font-medium text-gray-500'>QUESTION</div>
|
||||
<AutoHeightTextarea
|
||||
outerClassName='mb-4'
|
||||
className='leading-6 text-md text-gray-800'
|
||||
value={question}
|
||||
placeholder={t('datasetDocuments.segment.questionPlaceholder') || ''}
|
||||
onChange={e => setQuestion(e.target.value)}
|
||||
autoFocus
|
||||
/>
|
||||
<div className='mb-1 text-xs font-medium text-gray-500'>ANSWER</div>
|
||||
<AutoHeightTextarea
|
||||
outerClassName='mb-4'
|
||||
className='leading-6 text-md text-gray-800'
|
||||
value={answer}
|
||||
placeholder={t('datasetDocuments.segment.answerPlaceholder') || ''}
|
||||
onChange={e => setAnswer(e.target.value)}
|
||||
/>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<AutoHeightTextarea
|
||||
className='leading-6 text-md text-gray-800'
|
||||
value={question}
|
||||
placeholder={t('datasetDocuments.segment.contentPlaceholder') || ''}
|
||||
onChange={e => setQuestion(e.target.value)}
|
||||
autoFocus
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<Modal isShow={isShow} onClose={() => {}} className='pt-8 px-8 pb-6 !max-w-[640px] !rounded-xl'>
|
||||
<div className={'flex flex-col relative'}>
|
||||
<div className='absolute right-0 -top-0.5 flex items-center h-6'>
|
||||
<div className='flex justify-center items-center w-6 h-6 cursor-pointer' onClick={handleCancel}>
|
||||
<XClose className='w-4 h-4 text-gray-500' />
|
||||
</div>
|
||||
</div>
|
||||
<div className='mb-[14px]'>
|
||||
<span className='inline-flex items-center px-1.5 h-5 border border-gray-200 rounded-md'>
|
||||
<Hash02 className='mr-0.5 w-3 h-3 text-gray-400' />
|
||||
<span className='text-[11px] font-medium text-gray-500 italic'>
|
||||
{
|
||||
docForm === 'qa_model'
|
||||
? t('datasetDocuments.segment.newQaSegment')
|
||||
: t('datasetDocuments.segment.newTextSegment')
|
||||
}
|
||||
</span>
|
||||
</span>
|
||||
</div>
|
||||
<div className='mb-4 py-1.5 h-[420px] overflow-auto'>{renderContent()}</div>
|
||||
<div className='mb-2 text-xs font-medium text-gray-500'>{t('datasetDocuments.segment.keywords')}</div>
|
||||
<div className='mb-8'></div>
|
||||
<div className='flex justify-end'>
|
||||
<Button
|
||||
className='mr-2 !h-9 !px-4 !py-2 text-sm font-medium text-gray-700 !rounded-lg'
|
||||
onClick={handleCancel}>
|
||||
{t('common.operation.cancel')}
|
||||
</Button>
|
||||
<Button
|
||||
type='primary'
|
||||
className='!h-9 !px-4 !py-2 text-sm font-medium !rounded-lg'
|
||||
onClick={handleSave}>
|
||||
{t('common.operation.save')}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
</Modal>
|
||||
)
|
||||
})
|
||||
|
||||
export default NewSegmentModal
|
||||
Loading…
Reference in New Issue