Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove unnecessary langsmith dependency #369

Merged
merged 9 commits into from
Aug 18, 2024
2 changes: 0 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,6 @@ A `Microservices` can be created by using the decorator `register_microservice`.

```python
from langchain_community.embeddings import HuggingFaceHubEmbeddings
from langsmith import traceable

from comps import register_microservice, EmbedDoc, ServiceType, TextDoc

Expand All @@ -187,7 +186,6 @@ from comps import register_microservice, EmbedDoc, ServiceType, TextDoc
input_datatype=TextDoc,
output_datatype=EmbedDoc,
)
@traceable(run_type="embedding")
def embedding(input: TextDoc) -> EmbedDoc:
embed_vector = embeddings.embed_query(input.text)
res = EmbedDoc(text=input.text, embedding=embed_vector)
Expand Down
1 change: 0 additions & 1 deletion comps/agent/langchain/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ langchain-openai
langchain_community
langchainhub
langgraph
langsmith
numpy

# used by cloud native
Expand Down
2 changes: 0 additions & 2 deletions comps/dataprep/milvus/prepare_doc_milvus.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
from langchain_community.embeddings import HuggingFaceBgeEmbeddings, HuggingFaceHubEmbeddings, OpenAIEmbeddings
from langchain_milvus.vectorstores import Milvus
from langchain_text_splitters import HTMLHeaderTextSplitter
from langsmith import traceable
from pyspark import SparkConf, SparkContext

from comps import DocPath, opea_microservices, register_microservice
Expand Down Expand Up @@ -160,7 +159,6 @@ def ingest_link_to_milvus(link_list: List[str]):


@register_microservice(name="opea_service@prepare_doc_milvus", endpoint="/v1/dataprep", host="0.0.0.0", port=6010)
@traceable(run_type="tool")
async def ingest_documents(
files: Optional[Union[UploadFile, List[UploadFile]]] = File(None),
link_list: Optional[str] = Form(None),
Expand Down
1 change: 0 additions & 1 deletion comps/dataprep/milvus/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ langchain
langchain-community
langchain-text-splitters
langchain_milvus
langsmith
markdown
numpy
openai
Expand Down
2 changes: 0 additions & 2 deletions comps/dataprep/pgvector/langchain/prepare_doc_pgvector.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceBgeEmbeddings, HuggingFaceHubEmbeddings
from langchain_community.vectorstores import PGVector
from langsmith import traceable

from comps import DocPath, ServiceType, opea_microservices, register_microservice, register_statistics
from comps.dataprep.utils import document_loader, get_separators, parse_html
Expand Down Expand Up @@ -101,7 +100,6 @@ def ingest_link_to_pgvector(link_list: List[str]):
host="0.0.0.0",
port=6007,
)
@traceable(run_type="tool")
@register_statistics(names=["opea_service@dataprep_pgvector"])
async def ingest_documents(
files: Optional[Union[UploadFile, List[UploadFile]]] = File(None), link_list: Optional[str] = Form(None)
Expand Down
1 change: 0 additions & 1 deletion comps/dataprep/pgvector/langchain/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ fastapi
huggingface_hub
langchain
langchain-community
langsmith
markdown
numpy
opentelemetry-api
Expand Down
1 change: 0 additions & 1 deletion comps/dataprep/pinecone/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ huggingface_hub
langchain
langchain-community
langchain-pinecone
langsmith
numpy
opentelemetry-api
opentelemetry-exporter-otlp
Expand Down
4 changes: 0 additions & 4 deletions comps/dataprep/redis/langchain/prepare_doc_redis.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
from langchain_community.embeddings import HuggingFaceBgeEmbeddings, HuggingFaceHubEmbeddings
from langchain_community.vectorstores import Redis
from langchain_text_splitters import HTMLHeaderTextSplitter
from langsmith import traceable
from redis.commands.search.field import TextField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType

Expand Down Expand Up @@ -206,7 +205,6 @@ async def ingest_link_to_redis(link_list: List[str]):


@register_microservice(name="opea_service@prepare_doc_redis", endpoint="/v1/dataprep", host="0.0.0.0", port=6007)
@traceable(run_type="tool")
async def ingest_documents(
files: Optional[Union[UploadFile, List[UploadFile]]] = File(None),
link_list: Optional[str] = Form(None),
Expand Down Expand Up @@ -279,7 +277,6 @@ async def ingest_documents(
@register_microservice(
name="opea_service@prepare_doc_redis_file", endpoint="/v1/dataprep/get_file", host="0.0.0.0", port=6008
)
@traceable(run_type="tool")
async def rag_get_file_structure():
print("[ dataprep - get file ] start to get file structure")

Expand All @@ -294,7 +291,6 @@ async def rag_get_file_structure():
@register_microservice(
name="opea_service@prepare_doc_redis_del", endpoint="/v1/dataprep/delete_file", host="0.0.0.0", port=6009
)
@traceable(run_type="tool")
async def delete_single_file(file_path: str = Body(..., embed=True)):
"""Delete file according to `file_path`.

Expand Down
1 change: 0 additions & 1 deletion comps/dataprep/redis/langchain/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ huggingface_hub
langchain
langchain-community
langchain-text-splitters
langsmith
markdown
numpy
opentelemetry-api
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceBgeEmbeddings, HuggingFaceHubEmbeddings
from langchain_community.vectorstores import Redis
from langsmith import traceable

cur_path = pathlib.Path(__file__).parent.resolve()
comps_path = os.path.join(cur_path, "../../../../")
Expand Down Expand Up @@ -322,7 +321,6 @@ async def ingest_documents(files: List[UploadFile] = File(None), link_list: str
@register_microservice(
name="opea_service@prepare_doc_redis_file", endpoint="/v1/dataprep/get_file", host="0.0.0.0", port=6008
)
@traceable(run_type="tool")
async def rag_get_file_structure():
print("[ get_file_structure] ")

Expand All @@ -337,7 +335,6 @@ async def rag_get_file_structure():
@register_microservice(
name="opea_service@prepare_doc_redis_del", endpoint="/v1/dataprep/delete_file", host="0.0.0.0", port=6009
)
@traceable(run_type="tool")
async def delete_single_file(file_path: str = Body(..., embed=True)):
"""Delete file according to `file_path`.

Expand Down
1 change: 0 additions & 1 deletion comps/dataprep/redis/langchain_ray/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ fastapi
huggingface_hub
langchain
langchain-community
langsmith
numpy
opentelemetry-api
opentelemetry-exporter-otlp
Expand Down
4 changes: 0 additions & 4 deletions comps/dataprep/redis/llama_index/prepare_doc_redis.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@

from config import EMBED_MODEL, INDEX_NAME, REDIS_URL
from fastapi import Body, File, HTTPException, UploadFile
from langsmith import traceable
from llama_index.core import SimpleDirectoryReader, StorageContext, VectorStoreIndex
from llama_index.core.settings import Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
Expand Down Expand Up @@ -55,7 +54,6 @@ async def ingest_data_to_redis(doc_path: DocPath):


@register_microservice(name="opea_service@prepare_doc_redis", endpoint="/v1/dataprep", host="0.0.0.0", port=6007)
@traceable(run_type="tool")
# llama index only support upload files now
async def ingest_documents(files: Optional[Union[UploadFile, List[UploadFile]]] = File(None)):
print(f"files:{files}")
Expand All @@ -81,7 +79,6 @@ async def ingest_documents(files: Optional[Union[UploadFile, List[UploadFile]]]
@register_microservice(
name="opea_service@prepare_doc_redis_file", endpoint="/v1/dataprep/get_file", host="0.0.0.0", port=6008
)
@traceable(run_type="tool")
async def rag_get_file_structure():
print("[ get_file_structure] ")

Expand All @@ -96,7 +93,6 @@ async def rag_get_file_structure():
@register_microservice(
name="opea_service@prepare_doc_redis_del", endpoint="/v1/dataprep/delete_file", host="0.0.0.0", port=6009
)
@traceable(run_type="tool")
async def delete_single_file(file_path: str = Body(..., embed=True)):
"""Delete file according to `file_path`.

Expand Down
1 change: 0 additions & 1 deletion comps/dataprep/redis/llama_index/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
docarray[full]
fastapi
huggingface_hub
langsmith
llama-index
llama-index-embeddings-huggingface==0.2.0
llama-index-readers-file
Expand Down
2 changes: 0 additions & 2 deletions comps/embeddings/langchain-mosec/embedding_mosec.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from typing import List, Optional

from langchain_community.embeddings import OpenAIEmbeddings
from langsmith import traceable

from comps import (
EmbedDoc,
Expand Down Expand Up @@ -53,7 +52,6 @@ def empty_embedding() -> List[float]:
input_datatype=TextDoc,
output_datatype=EmbedDoc,
)
@traceable(run_type="embedding")
@register_statistics(names=["opea_service@embedding_mosec"])
def embedding(input: TextDoc) -> EmbedDoc:
start = time.time()
Expand Down
4 changes: 1 addition & 3 deletions comps/embeddings/langchain/embedding_tei.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import time

from langchain_community.embeddings import HuggingFaceHubEmbeddings
from langsmith import traceable

from comps import (
EmbedDoc,
Expand All @@ -27,7 +26,6 @@
input_datatype=TextDoc,
output_datatype=EmbedDoc,
)
@traceable(run_type="embedding")
@register_statistics(names=["opea_service@embedding_tei_langchain"])
def embedding(input: TextDoc) -> EmbedDoc:
start = time.time()
Expand All @@ -40,5 +38,5 @@ def embedding(input: TextDoc) -> EmbedDoc:
if __name__ == "__main__":
tei_embedding_endpoint = os.getenv("TEI_EMBEDDING_ENDPOINT", "http://localhost:8080")
embeddings = HuggingFaceHubEmbeddings(model=tei_embedding_endpoint)
print("TEI Gaudi Embedding initialized.")
print("TEI Embedding initialized.")
opea_microservices["opea_service@embedding_tei_langchain"].start()
1 change: 0 additions & 1 deletion comps/embeddings/langchain/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ docarray[full]
fastapi
huggingface_hub
langchain
langsmith
opentelemetry-api
opentelemetry-exporter-otlp
opentelemetry-sdk
Expand Down
2 changes: 0 additions & 2 deletions comps/embeddings/llama_index/embedding_tei.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

import os

from langsmith import traceable
from llama_index.embeddings.text_embeddings_inference import TextEmbeddingsInference

from comps import EmbedDoc, ServiceType, TextDoc, opea_microservices, register_microservice
Expand All @@ -18,7 +17,6 @@
input_datatype=TextDoc,
output_datatype=EmbedDoc,
)
@traceable(run_type="embedding")
def embedding(input: TextDoc) -> EmbedDoc:
embed_vector = embeddings._get_query_embedding(input.text)
res = EmbedDoc(text=input.text, embedding=embed_vector)
Expand Down
2 changes: 0 additions & 2 deletions comps/embeddings/llama_index/local_embedding.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

from langsmith import traceable
from llama_index.embeddings.huggingface import HuggingFaceEmbedding

from comps import EmbedDoc, ServiceType, TextDoc, opea_microservices, register_microservice
Expand All @@ -16,7 +15,6 @@
input_datatype=TextDoc,
output_datatype=EmbedDoc,
)
@traceable(run_type="embedding")
def embedding(input: TextDoc) -> EmbedDoc:
embed_vector = embeddings.get_text_embedding(input.text)
res = EmbedDoc(text=input.text, embedding=embed_vector)
Expand Down
1 change: 0 additions & 1 deletion comps/embeddings/llama_index/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
docarray[full]
fastapi
huggingface_hub
langsmith
llama-index-embeddings-text-embeddings-inference
opentelemetry-api
opentelemetry-exporter-otlp
Expand Down
2 changes: 0 additions & 2 deletions comps/guardrails/langchain/guardrails_tgi_gaudi.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from langchain_community.utilities.requests import JsonRequestsWrapper
from langchain_huggingface import ChatHuggingFace
from langchain_huggingface.llms import HuggingFaceEndpoint
from langsmith import traceable

from comps import ServiceType, TextDoc, opea_microservices, register_microservice

Expand Down Expand Up @@ -62,7 +61,6 @@ def get_tgi_service_model_id(endpoint_url, default=DEFAULT_MODEL):
input_datatype=TextDoc,
output_datatype=TextDoc,
)
@traceable(run_type="llm")
def safety_guard(input: TextDoc) -> TextDoc:
response_input_guard = llm_engine_hf.invoke([{"role": "user", "content": input.text}]).content
if "unsafe" in response_input_guard:
Expand Down
1 change: 0 additions & 1 deletion comps/guardrails/pii_detection/pii_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from pathlib import Path

from fastapi import File, Form, HTTPException, UploadFile
from langsmith import traceable

cur_path = pathlib.Path(__file__).parent.resolve()
comps_path = os.path.join(cur_path, "../../../")
Expand Down
1 change: 0 additions & 1 deletion comps/guardrails/pii_detection/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ gibberish-detector
huggingface_hub
langchain
langchain-community
langsmith
numpy
opentelemetry-api
opentelemetry-exporter-otlp
Expand Down
1 change: 0 additions & 1 deletion comps/guardrails/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ fastapi
huggingface_hub
langchain-community
langchain-huggingface
langsmith
opentelemetry-api
opentelemetry-exporter-otlp
opentelemetry-sdk
Expand Down
1 change: 0 additions & 1 deletion comps/knowledgegraphs/langchain/knowledge_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
from langchain_community.graphs import Neo4jGraph
from langchain_community.llms import HuggingFaceEndpoint
from langchain_community.vectorstores.neo4j_vector import Neo4jVector
from langsmith import traceable

from comps import GeneratedDoc, GraphDoc, ServiceType, opea_microservices, register_microservice

Expand Down
1 change: 0 additions & 1 deletion comps/knowledgegraphs/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ langchain
langchain_community==0.2.5
langchain_openai
langchainhub
langsmith
neo4j
numpy
opentelemetry-api
Expand Down
3 changes: 0 additions & 3 deletions comps/llms/faq-generation/tgi/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,10 @@
from langchain.prompts import PromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.llms import HuggingFaceEndpoint
from langsmith import traceable

from comps import GeneratedDoc, LLMParamsDoc, ServiceType, opea_microservices, register_microservice


@traceable(run_type="tool")
def post_process_text(text: str):
if text == " ":
return "data: @#$\n\n"
Expand All @@ -33,7 +31,6 @@ def post_process_text(text: str):
host="0.0.0.0",
port=9000,
)
@traceable(run_type="llm")
def llm_generate(input: LLMParamsDoc):
llm_endpoint = os.getenv("TGI_LLM_ENDPOINT", "http://localhost:8080")
llm = HuggingFaceEndpoint(
Expand Down
1 change: 0 additions & 1 deletion comps/llms/faq-generation/tgi/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ fastapi
huggingface_hub
langchain==0.1.16
langserve
langsmith
opentelemetry-api
opentelemetry-exporter-otlp
opentelemetry-sdk
Expand Down
3 changes: 0 additions & 3 deletions comps/llms/summarization/tgi/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,10 @@
from langchain.docstore.document import Document
from langchain.text_splitter import CharacterTextSplitter
from langchain_huggingface import HuggingFaceEndpoint
from langsmith import traceable

from comps import GeneratedDoc, LLMParamsDoc, ServiceType, opea_microservices, register_microservice


@traceable(run_type="tool")
def post_process_text(text: str):
if text == " ":
return "data: @#$\n\n"
Expand All @@ -32,7 +30,6 @@ def post_process_text(text: str):
host="0.0.0.0",
port=9000,
)
@traceable(run_type="llm")
def llm_generate(input: LLMParamsDoc):
llm_endpoint = os.getenv("TGI_LLM_ENDPOINT", "http://localhost:8080")
llm = HuggingFaceEndpoint(
Expand Down
1 change: 0 additions & 1 deletion comps/llms/summarization/tgi/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ langchain-huggingface
langchain-openai
langchain_community
langchainhub
langsmith
opentelemetry-api
opentelemetry-exporter-otlp
opentelemetry-sdk
Expand Down
Loading
Loading