diff --git a/src/backend/base/langflow/components/vectorstores/astradb.py b/src/backend/base/langflow/components/vectorstores/astradb.py index 73aa594941be..90abf7941084 100644 --- a/src/backend/base/langflow/components/vectorstores/astradb.py +++ b/src/backend/base/langflow/components/vectorstores/astradb.py @@ -478,9 +478,10 @@ def update_build_config(self, build_config: dict, field_value: str, field_name: # Define variables for common database conditions a user may experience is_hosted = os.getenv("LANGFLOW_HOST") is not None no_databases = "options" not in build_config["api_endpoint"] or not build_config["api_endpoint"]["options"] + no_api_endpoint = not build_config["api_endpoint"]["value"] # Refresh the database name options - if not is_hosted and (field_name in ["token", "environment"] or no_databases): + if not is_hosted and (field_name in ["token", "environment"] or (no_databases and no_api_endpoint)): # Get the list of options we have based on the token provided database_options = self._initialize_database_options() @@ -524,11 +525,8 @@ def update_build_config(self, build_config: dict, field_value: str, field_name: ] = self.map_cloud_providers()[cloud_provider]["regions"] """ - # Define variables for common collection conditions a user may experience - no_collections = not build_config["collection_name"]["options"] - # Refresh the collection name options - if field_name == "api_endpoint" or (field_name == "collection_name" and no_collections): + if field_name == "api_endpoint": # Reset the selected collection build_config["collection_name"]["value"] = "" @@ -539,11 +537,15 @@ def update_build_config(self, build_config: dict, field_value: str, field_name: {k: v for k, v in col.items() if k not in ["name"]} for col in collection_options ] - # Define variables for common collection choice conditions a user may experience - collection_chosen = field_value and build_config["collection_name"]["options"] - # Hide embedding model option if opriona_metadata provider is not null - if field_name == "collection_name" and collection_chosen: + if field_name == "collection_name" and field_value: + # Set the options for collection name to be the field value if its a new collection + if not is_hosted and field_value not in build_config["collection_name"]["options"]: + build_config["collection_name"]["options"].append(field_value) + build_config["collection_name"]["options_metadata"].append( + {"records": 0, "provider": None, "icon": "", "model": None} + ) + # Find location of the name in the options list index_of_name = build_config["collection_name"]["options"].index(field_value) value_of_provider = build_config["collection_name"]["options_metadata"][index_of_name]["provider"] diff --git a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json index 605afc798eea..12058b461860 100644 --- a/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json +++ b/src/backend/base/langflow/initial_setup/starter_projects/Vector Store RAG.json @@ -7,7 +7,7 @@ "data": { "sourceHandle": { "dataType": "ParseData", - "id": "ParseData-BUk4L", + "id": "ParseData-rM0er", "name": "text", "output_types": [ "Message" @@ -15,7 +15,7 @@ }, "targetHandle": { "fieldName": "context", - "id": "Prompt-eDtym", + "id": "Prompt-0IrIN", "inputTypes": [ "Message", "Text" @@ -23,11 +23,11 @@ "type": "str" } }, - "id": "reactflow__edge-ParseData-BUk4L{œdataTypeœ:œParseDataœ,œidœ:œParseData-BUk4Lœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-eDtym{œfieldNameœ:œcontextœ,œidœ:œPrompt-eDtymœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", - "source": "ParseData-BUk4L", - "sourceHandle": "{œdataTypeœ: œParseDataœ, œidœ: œParseData-BUk4Lœ, œnameœ: œtextœ, œoutput_typesœ: [œMessageœ]}", - "target": "Prompt-eDtym", - "targetHandle": "{œfieldNameœ: œcontextœ, œidœ: œPrompt-eDtymœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" + "id": "reactflow__edge-ParseData-rM0er{œdataTypeœ:œParseDataœ,œidœ:œParseData-rM0erœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-0IrIN{œfieldNameœ:œcontextœ,œidœ:œPrompt-0IrINœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", + "source": "ParseData-rM0er", + "sourceHandle": "{œdataTypeœ: œParseDataœ, œidœ: œParseData-rM0erœ, œnameœ: œtextœ, œoutput_typesœ: [œMessageœ]}", + "target": "Prompt-0IrIN", + "targetHandle": "{œfieldNameœ: œcontextœ, œidœ: œPrompt-0IrINœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" }, { "animated": false, @@ -35,7 +35,7 @@ "data": { "sourceHandle": { "dataType": "ChatInput", - "id": "ChatInput-wA18c", + "id": "ChatInput-Et6gu", "name": "message", "output_types": [ "Message" @@ -43,7 +43,7 @@ }, "targetHandle": { "fieldName": "question", - "id": "Prompt-eDtym", + "id": "Prompt-0IrIN", "inputTypes": [ "Message", "Text" @@ -51,11 +51,11 @@ "type": "str" } }, - "id": "reactflow__edge-ChatInput-wA18c{œdataTypeœ:œChatInputœ,œidœ:œChatInput-wA18cœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Prompt-eDtym{œfieldNameœ:œquestionœ,œidœ:œPrompt-eDtymœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", - "source": "ChatInput-wA18c", - "sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-wA18cœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", - "target": "Prompt-eDtym", - "targetHandle": "{œfieldNameœ: œquestionœ, œidœ: œPrompt-eDtymœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" + "id": "reactflow__edge-ChatInput-Et6gu{œdataTypeœ:œChatInputœ,œidœ:œChatInput-Et6guœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Prompt-0IrIN{œfieldNameœ:œquestionœ,œidœ:œPrompt-0IrINœ,œinputTypesœ:[œMessageœ,œTextœ],œtypeœ:œstrœ}", + "source": "ChatInput-Et6gu", + "sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-Et6guœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", + "target": "Prompt-0IrIN", + "targetHandle": "{œfieldNameœ: œquestionœ, œidœ: œPrompt-0IrINœ, œinputTypesœ: [œMessageœ, œTextœ], œtypeœ: œstrœ}" }, { "animated": false, @@ -63,7 +63,7 @@ "data": { "sourceHandle": { "dataType": "File", - "id": "File-qQ2Ve", + "id": "File-Iwn0a", "name": "data", "output_types": [ "Data" @@ -71,25 +71,25 @@ }, "targetHandle": { "fieldName": "data_inputs", - "id": "SplitText-y3ZXP", + "id": "SplitText-ZR67Y", "inputTypes": [ "Data" ], "type": "other" } }, - "id": "reactflow__edge-File-qQ2Ve{œdataTypeœ:œFileœ,œidœ:œFile-qQ2Veœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-SplitText-y3ZXP{œfieldNameœ:œdata_inputsœ,œidœ:œSplitText-y3ZXPœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", - "source": "File-qQ2Ve", - "sourceHandle": "{œdataTypeœ: œFileœ, œidœ: œFile-qQ2Veœ, œnameœ: œdataœ, œoutput_typesœ: [œDataœ]}", - "target": "SplitText-y3ZXP", - "targetHandle": "{œfieldNameœ: œdata_inputsœ, œidœ: œSplitText-y3ZXPœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" + "id": "reactflow__edge-File-Iwn0a{œdataTypeœ:œFileœ,œidœ:œFile-Iwn0aœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-SplitText-ZR67Y{œfieldNameœ:œdata_inputsœ,œidœ:œSplitText-ZR67Yœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "source": "File-Iwn0a", + "sourceHandle": "{œdataTypeœ: œFileœ, œidœ: œFile-Iwn0aœ, œnameœ: œdataœ, œoutput_typesœ: [œDataœ]}", + "target": "SplitText-ZR67Y", + "targetHandle": "{œfieldNameœ: œdata_inputsœ, œidœ: œSplitText-ZR67Yœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" }, { "className": "", "data": { "sourceHandle": { "dataType": "Prompt", - "id": "Prompt-eDtym", + "id": "Prompt-0IrIN", "name": "prompt", "output_types": [ "Message" @@ -97,25 +97,25 @@ }, "targetHandle": { "fieldName": "input_value", - "id": "OpenAIModel-KLi32", + "id": "OpenAIModel-ivGZd", "inputTypes": [ "Message" ], "type": "str" } }, - "id": "reactflow__edge-Prompt-eDtym{œdataTypeœ:œPromptœ,œidœ:œPrompt-eDtymœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-KLi32{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-KLi32œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "source": "Prompt-eDtym", - "sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-eDtymœ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}", - "target": "OpenAIModel-KLi32", - "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-KLi32œ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" + "id": "reactflow__edge-Prompt-0IrIN{œdataTypeœ:œPromptœ,œidœ:œPrompt-0IrINœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-ivGZd{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-ivGZdœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "source": "Prompt-0IrIN", + "sourceHandle": "{œdataTypeœ: œPromptœ, œidœ: œPrompt-0IrINœ, œnameœ: œpromptœ, œoutput_typesœ: [œMessageœ]}", + "target": "OpenAIModel-ivGZd", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œOpenAIModel-ivGZdœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" }, { "className": "", "data": { "sourceHandle": { "dataType": "OpenAIModel", - "id": "OpenAIModel-KLi32", + "id": "OpenAIModel-ivGZd", "name": "text_output", "output_types": [ "Message" @@ -123,143 +123,143 @@ }, "targetHandle": { "fieldName": "input_value", - "id": "ChatOutput-0WLJD", + "id": "ChatOutput-jT0HE", "inputTypes": [ "Message" ], "type": "str" } }, - "id": "reactflow__edge-OpenAIModel-KLi32{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-KLi32œ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-0WLJD{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-0WLJDœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "source": "OpenAIModel-KLi32", - "sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-KLi32œ, œnameœ: œtext_outputœ, œoutput_typesœ: [œMessageœ]}", - "target": "ChatOutput-0WLJD", - "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-0WLJDœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" + "id": "reactflow__edge-OpenAIModel-ivGZd{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-ivGZdœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-jT0HE{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-jT0HEœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "source": "OpenAIModel-ivGZd", + "sourceHandle": "{œdataTypeœ: œOpenAIModelœ, œidœ: œOpenAIModel-ivGZdœ, œnameœ: œtext_outputœ, œoutput_typesœ: [œMessageœ]}", + "target": "ChatOutput-jT0HE", + "targetHandle": "{œfieldNameœ: œinput_valueœ, œidœ: œChatOutput-jT0HEœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" }, { "data": { "sourceHandle": { - "dataType": "SplitText", - "id": "SplitText-y3ZXP", - "name": "chunks", + "dataType": "OpenAIEmbeddings", + "id": "OpenAIEmbeddings-gxtIK", + "name": "embeddings", "output_types": [ - "Data" + "Embeddings" ] }, "targetHandle": { - "fieldName": "ingest_data", - "id": "AstraDB-On9gj", + "fieldName": "embedding_model", + "id": "AstraDB-q0CNq", "inputTypes": [ - "Data" + "Embeddings" ], "type": "other" } }, - "id": "xy-edge__SplitText-y3ZXP{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-y3ZXPœ,œnameœ:œchunksœ,œoutput_typesœ:[œDataœ]}-AstraDB-On9gj{œfieldNameœ:œingest_dataœ,œidœ:œAstraDB-On9gjœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", - "source": "SplitText-y3ZXP", - "sourceHandle": "{œdataTypeœ: œSplitTextœ, œidœ: œSplitText-y3ZXPœ, œnameœ: œchunksœ, œoutput_typesœ: [œDataœ]}", - "target": "AstraDB-On9gj", - "targetHandle": "{œfieldNameœ: œingest_dataœ, œidœ: œAstraDB-On9gjœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" + "id": "xy-edge__OpenAIEmbeddings-gxtIK{œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-gxtIKœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-AstraDB-q0CNq{œfieldNameœ:œembedding_modelœ,œidœ:œAstraDB-q0CNqœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "source": "OpenAIEmbeddings-gxtIK", + "sourceHandle": "{œdataTypeœ: œOpenAIEmbeddingsœ, œidœ: œOpenAIEmbeddings-gxtIKœ, œnameœ: œembeddingsœ, œoutput_typesœ: [œEmbeddingsœ]}", + "target": "AstraDB-q0CNq", + "targetHandle": "{œfieldNameœ: œembedding_modelœ, œidœ: œAstraDB-q0CNqœ, œinputTypesœ: [œEmbeddingsœ], œtypeœ: œotherœ}" }, { "data": { "sourceHandle": { - "dataType": "OpenAIEmbeddings", - "id": "OpenAIEmbeddings-AjeYo", - "name": "embeddings", + "dataType": "ChatInput", + "id": "ChatInput-Et6gu", + "name": "message", "output_types": [ - "Embeddings" + "Message" ] }, "targetHandle": { - "fieldName": "embedding_model", - "id": "AstraDB-On9gj", + "fieldName": "search_query", + "id": "AstraDB-q0CNq", "inputTypes": [ - "Embeddings" + "Message" ], - "type": "other" + "type": "str" } }, - "id": "xy-edge__OpenAIEmbeddings-AjeYo{œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-AjeYoœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-AstraDB-On9gj{œfieldNameœ:œembedding_modelœ,œidœ:œAstraDB-On9gjœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", - "source": "OpenAIEmbeddings-AjeYo", - "sourceHandle": "{œdataTypeœ: œOpenAIEmbeddingsœ, œidœ: œOpenAIEmbeddings-AjeYoœ, œnameœ: œembeddingsœ, œoutput_typesœ: [œEmbeddingsœ]}", - "target": "AstraDB-On9gj", - "targetHandle": "{œfieldNameœ: œembedding_modelœ, œidœ: œAstraDB-On9gjœ, œinputTypesœ: [œEmbeddingsœ], œtypeœ: œotherœ}" + "id": "xy-edge__ChatInput-Et6gu{œdataTypeœ:œChatInputœ,œidœ:œChatInput-Et6guœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-AstraDB-q0CNq{œfieldNameœ:œsearch_queryœ,œidœ:œAstraDB-q0CNqœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "source": "ChatInput-Et6gu", + "sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-Et6guœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", + "target": "AstraDB-q0CNq", + "targetHandle": "{œfieldNameœ: œsearch_queryœ, œidœ: œAstraDB-q0CNqœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" }, { "data": { "sourceHandle": { - "dataType": "OpenAIEmbeddings", - "id": "OpenAIEmbeddings-0KAu8", - "name": "embeddings", + "dataType": "AstraDB", + "id": "AstraDB-q0CNq", + "name": "search_results", "output_types": [ - "Embeddings" + "Data" ] }, "targetHandle": { - "fieldName": "embedding_model", - "id": "AstraDB-OPirQ", + "fieldName": "data", + "id": "ParseData-rM0er", "inputTypes": [ - "Embeddings" + "Data" ], "type": "other" } }, - "id": "xy-edge__OpenAIEmbeddings-0KAu8{œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-0KAu8œ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-AstraDB-OPirQ{œfieldNameœ:œembedding_modelœ,œidœ:œAstraDB-OPirQœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", - "source": "OpenAIEmbeddings-0KAu8", - "sourceHandle": "{œdataTypeœ: œOpenAIEmbeddingsœ, œidœ: œOpenAIEmbeddings-0KAu8œ, œnameœ: œembeddingsœ, œoutput_typesœ: [œEmbeddingsœ]}", - "target": "AstraDB-OPirQ", - "targetHandle": "{œfieldNameœ: œembedding_modelœ, œidœ: œAstraDB-OPirQœ, œinputTypesœ: [œEmbeddingsœ], œtypeœ: œotherœ}" + "id": "xy-edge__AstraDB-q0CNq{œdataTypeœ:œAstraDBœ,œidœ:œAstraDB-q0CNqœ,œnameœ:œsearch_resultsœ,œoutput_typesœ:[œDataœ]}-ParseData-rM0er{œfieldNameœ:œdataœ,œidœ:œParseData-rM0erœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "source": "AstraDB-q0CNq", + "sourceHandle": "{œdataTypeœ: œAstraDBœ, œidœ: œAstraDB-q0CNqœ, œnameœ: œsearch_resultsœ, œoutput_typesœ: [œDataœ]}", + "target": "ParseData-rM0er", + "targetHandle": "{œfieldNameœ: œdataœ, œidœ: œParseData-rM0erœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" }, { "data": { "sourceHandle": { - "dataType": "ChatInput", - "id": "ChatInput-wA18c", - "name": "message", + "dataType": "OpenAIEmbeddings", + "id": "OpenAIEmbeddings-XrIYd", + "name": "embeddings", "output_types": [ - "Message" + "Embeddings" ] }, "targetHandle": { - "fieldName": "search_query", - "id": "AstraDB-OPirQ", + "fieldName": "embedding_model", + "id": "AstraDB-DY2V0", "inputTypes": [ - "Message" + "Embeddings" ], - "type": "str" + "type": "other" } }, - "id": "xy-edge__ChatInput-wA18c{œdataTypeœ:œChatInputœ,œidœ:œChatInput-wA18cœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-AstraDB-OPirQ{œfieldNameœ:œsearch_queryœ,œidœ:œAstraDB-OPirQœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "source": "ChatInput-wA18c", - "sourceHandle": "{œdataTypeœ: œChatInputœ, œidœ: œChatInput-wA18cœ, œnameœ: œmessageœ, œoutput_typesœ: [œMessageœ]}", - "target": "AstraDB-OPirQ", - "targetHandle": "{œfieldNameœ: œsearch_queryœ, œidœ: œAstraDB-OPirQœ, œinputTypesœ: [œMessageœ], œtypeœ: œstrœ}" + "id": "xy-edge__OpenAIEmbeddings-XrIYd{œdataTypeœ:œOpenAIEmbeddingsœ,œidœ:œOpenAIEmbeddings-XrIYdœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-AstraDB-DY2V0{œfieldNameœ:œembedding_modelœ,œidœ:œAstraDB-DY2V0œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "source": "OpenAIEmbeddings-XrIYd", + "sourceHandle": "{œdataTypeœ: œOpenAIEmbeddingsœ, œidœ: œOpenAIEmbeddings-XrIYdœ, œnameœ: œembeddingsœ, œoutput_typesœ: [œEmbeddingsœ]}", + "target": "AstraDB-DY2V0", + "targetHandle": "{œfieldNameœ: œembedding_modelœ, œidœ: œAstraDB-DY2V0œ, œinputTypesœ: [œEmbeddingsœ], œtypeœ: œotherœ}" }, { "data": { "sourceHandle": { - "dataType": "AstraDB", - "id": "AstraDB-OPirQ", - "name": "search_results", + "dataType": "SplitText", + "id": "SplitText-ZR67Y", + "name": "chunks", "output_types": [ "Data" ] }, "targetHandle": { - "fieldName": "data", - "id": "ParseData-BUk4L", + "fieldName": "ingest_data", + "id": "AstraDB-DY2V0", "inputTypes": [ "Data" ], "type": "other" } }, - "id": "xy-edge__AstraDB-OPirQ{œdataTypeœ:œAstraDBœ,œidœ:œAstraDB-OPirQœ,œnameœ:œsearch_resultsœ,œoutput_typesœ:[œDataœ]}-ParseData-BUk4L{œfieldNameœ:œdataœ,œidœ:œParseData-BUk4Lœ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", - "source": "AstraDB-OPirQ", - "sourceHandle": "{œdataTypeœ: œAstraDBœ, œidœ: œAstraDB-OPirQœ, œnameœ: œsearch_resultsœ, œoutput_typesœ: [œDataœ]}", - "target": "ParseData-BUk4L", - "targetHandle": "{œfieldNameœ: œdataœ, œidœ: œParseData-BUk4Lœ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" + "id": "xy-edge__SplitText-ZR67Y{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-ZR67Yœ,œnameœ:œchunksœ,œoutput_typesœ:[œDataœ]}-AstraDB-DY2V0{œfieldNameœ:œingest_dataœ,œidœ:œAstraDB-DY2V0œ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "source": "SplitText-ZR67Y", + "sourceHandle": "{œdataTypeœ: œSplitTextœ, œidœ: œSplitText-ZR67Yœ, œnameœ: œchunksœ, œoutput_typesœ: [œDataœ]}", + "target": "AstraDB-DY2V0", + "targetHandle": "{œfieldNameœ: œingest_dataœ, œidœ: œAstraDB-DY2V0œ, œinputTypesœ: [œDataœ], œtypeœ: œotherœ}" } ], "nodes": [ @@ -267,7 +267,7 @@ "data": { "description": "Get chat inputs from the Playground.", "display_name": "Chat Input", - "id": "ChatInput-wA18c", + "id": "ChatInput-Et6gu", "node": { "base_classes": [ "Message" @@ -530,7 +530,7 @@ }, "dragging": false, "height": 234, - "id": "ChatInput-wA18c", + "id": "ChatInput-Et6gu", "measured": { "height": 234, "width": 320 @@ -551,7 +551,7 @@ "data": { "description": "Convert Data into plain text following a specified template.", "display_name": "Parse Data", - "id": "ParseData-BUk4L", + "id": "ParseData-rM0er", "node": { "base_classes": [ "Message" @@ -683,7 +683,7 @@ }, "dragging": false, "height": 350, - "id": "ParseData-BUk4L", + "id": "ParseData-rM0er", "measured": { "height": 350, "width": 320 @@ -704,7 +704,7 @@ "data": { "description": "Create a prompt template with dynamic variables.", "display_name": "Prompt", - "id": "Prompt-eDtym", + "id": "Prompt-0IrIN", "node": { "base_classes": [ "Message" @@ -862,7 +862,7 @@ }, "dragging": false, "height": 433, - "id": "Prompt-eDtym", + "id": "Prompt-0IrIN", "measured": { "height": 433, "width": 320 @@ -883,7 +883,7 @@ "data": { "description": "Split text into chunks based on specified criteria.", "display_name": "Split Text", - "id": "SplitText-y3ZXP", + "id": "SplitText-ZR67Y", "node": { "base_classes": [ "Data" @@ -1028,7 +1028,7 @@ }, "dragging": false, "height": 475, - "id": "SplitText-y3ZXP", + "id": "SplitText-ZR67Y", "measured": { "height": 475, "width": 320 @@ -1047,7 +1047,7 @@ }, { "data": { - "id": "note-14EcH", + "id": "note-64kCP", "node": { "description": "## 🐕 2. Retriever Flow\n\nThis flow answers your questions with contextual data retrieved from your vector database.\n\nOpen the **Playground** and ask, \n\n```\nWhat is this document about?\n```\n", "display_name": "", @@ -1060,7 +1060,7 @@ }, "dragging": false, "height": 324, - "id": "note-14EcH", + "id": "note-64kCP", "measured": { "height": 324, "width": 325 @@ -1084,7 +1084,7 @@ }, { "data": { - "id": "note-EJLAH", + "id": "note-lehA3", "node": { "description": "## 📖 README\n\nLoad your data into a vector database with the 📚 **Load Data** flow, and then use your data as chat context with the 🐕 **Retriever** flow.\n\n**🚨 Add your OpenAI API key as a global variable to easily add it to all of the OpenAI components in this flow.** \n\n**Quick start**\n1. Run the 📚 **Load Data** flow.\n2. Run the 🐕 **Retriever** flow.\n\n**Next steps** \n\n- Experiment by changing the prompt and the loaded data to see how the bot's responses change. \n\nFor more info, see the [Langflow docs](https://docs.langflow.org/starter-projects-vector-store-rag).", "display_name": "Read Me", @@ -1097,7 +1097,7 @@ }, "dragging": false, "height": 527, - "id": "note-EJLAH", + "id": "note-lehA3", "measured": { "height": 527, "width": 325 @@ -1123,7 +1123,7 @@ "data": { "description": "Display a chat message in the Playground.", "display_name": "Chat Output", - "id": "ChatOutput-0WLJD", + "id": "ChatOutput-jT0HE", "node": { "base_classes": [ "Message" @@ -1384,7 +1384,7 @@ }, "dragging": false, "height": 234, - "id": "ChatOutput-0WLJD", + "id": "ChatOutput-jT0HE", "measured": { "height": 234, "width": 320 @@ -1403,7 +1403,7 @@ }, { "data": { - "id": "OpenAIEmbeddings-0KAu8", + "id": "OpenAIEmbeddings-gxtIK", "node": { "base_classes": [ "Embeddings" @@ -1882,7 +1882,7 @@ }, "dragging": false, "height": 320, - "id": "OpenAIEmbeddings-0KAu8", + "id": "OpenAIEmbeddings-gxtIK", "measured": { "height": 320, "width": 320 @@ -1901,7 +1901,7 @@ }, { "data": { - "id": "note-qqMHW", + "id": "note-m74Bv", "node": { "description": "## 📚 1. Load Data Flow\n\nRun this first! Load data from a local file and embed it into the vector database.\n\nSelect a Database and a Collection, or create new ones. \n\nClick ▶️ **Run component** on the **Astra DB** component to load your data.\n\n* If you're using OSS Langflow, add your Astra DB Application Token to the Astra DB component.\n\n#### Next steps:\n Experiment by changing the prompt and the contextual data to see how the retrieval flow's responses change.", "display_name": "", @@ -1914,7 +1914,7 @@ }, "dragging": false, "height": 50, - "id": "note-qqMHW", + "id": "note-m74Bv", "measured": { "height": 50, "width": 325 @@ -1937,7 +1937,7 @@ }, { "data": { - "id": "OpenAIEmbeddings-AjeYo", + "id": "OpenAIEmbeddings-XrIYd", "node": { "base_classes": [ "Embeddings" @@ -2416,7 +2416,7 @@ }, "dragging": false, "height": 320, - "id": "OpenAIEmbeddings-AjeYo", + "id": "OpenAIEmbeddings-XrIYd", "measured": { "height": 320, "width": 320 @@ -2435,7 +2435,7 @@ }, { "data": { - "id": "File-qQ2Ve", + "id": "File-Iwn0a", "node": { "base_classes": [ "Data" @@ -2660,7 +2660,7 @@ }, "dragging": false, "height": 367, - "id": "File-qQ2Ve", + "id": "File-Iwn0a", "measured": { "height": 367, "width": 320 @@ -2679,7 +2679,7 @@ }, { "data": { - "id": "note-CS14d", + "id": "note-ge6ir", "node": { "description": "### 💡 Add your OpenAI API key here 👇", "display_name": "", @@ -2692,7 +2692,7 @@ }, "dragging": false, "height": 324, - "id": "note-CS14d", + "id": "note-ge6ir", "measured": { "height": 324, "width": 324 @@ -2711,7 +2711,7 @@ }, { "data": { - "id": "note-zIBHR", + "id": "note-7hmyZ", "node": { "description": "### 💡 Add your OpenAI API key here 👇", "display_name": "", @@ -2724,7 +2724,7 @@ }, "dragging": false, "height": 324, - "id": "note-zIBHR", + "id": "note-7hmyZ", "measured": { "height": 324, "width": 324 @@ -2743,7 +2743,7 @@ }, { "data": { - "id": "note-pWkb3", + "id": "note-JN3nw", "node": { "description": "### 💡 Add your OpenAI API key here 👇", "display_name": "", @@ -2756,7 +2756,7 @@ }, "dragging": false, "height": 324, - "id": "note-pWkb3", + "id": "note-JN3nw", "measured": { "height": 324, "width": 324 @@ -2775,7 +2775,7 @@ }, { "data": { - "id": "OpenAIModel-KLi32", + "id": "OpenAIModel-ivGZd", "node": { "base_classes": [ "LanguageModel", @@ -3105,7 +3105,7 @@ "type": "OpenAIModel" }, "dragging": false, - "id": "OpenAIModel-KLi32", + "id": "OpenAIModel-ivGZd", "measured": { "height": 656, "width": 320 @@ -3119,7 +3119,7 @@ }, { "data": { - "id": "AstraDB-OPirQ", + "id": "AstraDB-q0CNq", "node": { "base_classes": [ "Data", @@ -3268,7 +3268,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import os\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\n\nfrom astrapy import AstraDBAdmin, DataAPIClient, Database\nfrom langchain_astradb import AstraDBVectorStore, CollectionVectorServiceOptions\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import FloatInput, NestedDictInput\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\nfrom langflow.utils.version import get_version_info\n\n\nclass AstraDBVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Ingest and search documents in Astra DB\"\n documentation: str = \"https://docs.datastax.com/en/langflow/astra-components.html\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n _cached_vector_store: AstraDBVectorStore | None = None\n\n @dataclass\n class NewDatabaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"description\": \"Create a new database in Astra DB.\",\n \"display_name\": \"Create New Database\",\n \"field_order\": [\"new_database_name\", \"cloud_provider\", \"region\"],\n \"template\": {\n \"new_database_name\": StrInput(\n name=\"new_database_name\",\n display_name=\"New Database Name\",\n info=\"Name of the new database to create in Astra DB.\",\n required=True,\n ),\n \"cloud_provider\": DropdownInput(\n name=\"cloud_provider\",\n display_name=\"Cloud Provider\",\n info=\"Cloud provider for the new database.\",\n options=[\"Amazon Web Services\", \"Google Cloud Platform\", \"Microsoft Azure\"],\n required=True,\n ),\n \"region\": DropdownInput(\n name=\"region\",\n display_name=\"Region\",\n info=\"Region for the new database.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n @dataclass\n class NewCollectionInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"description\": \"Create a new collection in Astra DB.\",\n \"display_name\": \"Create New Collection\",\n \"field_order\": [\n \"new_collection_name\",\n \"embedding_generation_provider\",\n \"embedding_generation_model\",\n ],\n \"template\": {\n \"new_collection_name\": StrInput(\n name=\"new_collection_name\",\n display_name=\"New Collection Name\",\n info=\"Name of the new collection to create in Astra DB.\",\n required=True,\n ),\n \"embedding_generation_provider\": DropdownInput(\n name=\"embedding_generation_provider\",\n display_name=\"Embedding Generation Provider\",\n info=\"Provider to use for generating embeddings.\",\n options=[],\n required=True,\n ),\n \"embedding_generation_model\": DropdownInput(\n name=\"embedding_generation_model\",\n display_name=\"Embedding Generation Model\",\n info=\"Model to use for generating embeddings.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n real_time_refresh=True,\n input_types=[],\n ),\n StrInput(\n name=\"environment\",\n display_name=\"Environment\",\n info=\"The environment for the Astra DB API Endpoint.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"api_endpoint\",\n display_name=\"Database\",\n info=\"The Database / API Endpoint for the Astra DB instance.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n combobox=True,\n ),\n DropdownInput(\n name=\"collection_name\",\n display_name=\"Collection\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n # dialog_inputs=asdict(NewCollectionInput()),\n combobox=True,\n ),\n StrInput(\n name=\"keyspace\",\n display_name=\"Keyspace\",\n info=\"Optional keyspace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_choice\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Choose an embedding model or use Astra Vectorize.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n value=\"Embedding Model\",\n advanced=True,\n real_time_refresh=True,\n ),\n HandleInput(\n name=\"embedding_model\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Specify the Embedding Model. Not required for Astra Vectorize collections.\",\n required=False,\n ),\n *LCVectorStoreComponent.inputs,\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Search Results\",\n info=\"Number of search results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n NestedDictInput(\n name=\"advanced_search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n ),\n StrInput(\n name=\"content_field\",\n display_name=\"Content Field\",\n info=\"Field to use as the text content field for the vector store.\",\n advanced=True,\n ),\n StrInput(\n name=\"deletion_field\",\n display_name=\"Deletion Based On Field\",\n info=\"When this parameter is provided, documents in the target collection with \"\n \"metadata field values matching the input metadata field value will be deleted \"\n \"before new data is loaded.\",\n advanced=True,\n ),\n BoolInput(\n name=\"ignore_invalid_documents\",\n display_name=\"Ignore Invalid Documents\",\n info=\"Boolean flag to determine whether to ignore invalid documents at runtime.\",\n advanced=True,\n ),\n NestedDictInput(\n name=\"astradb_vectorstore_kwargs\",\n display_name=\"AstraDBVectorStore Parameters\",\n info=\"Optional dictionary of additional parameters for the AstraDBVectorStore.\",\n advanced=True,\n ),\n ]\n\n @classmethod\n def map_cloud_providers(cls):\n return {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-east-2\", \"ap-south-1\", \"eu-west-1\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-east1\"],\n },\n \"Microsoft Azure\": {\n \"id\": \"azure\",\n \"regions\": [\"westus3\"],\n },\n }\n\n @classmethod\n def create_database_api(\n cls,\n token: str,\n new_database_name: str,\n cloud_provider: str,\n region: str,\n ):\n client = DataAPIClient(token=token)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Call the create database function\n return admin_client.create_database(\n name=new_database_name,\n cloud_provider=cloud_provider,\n region=region,\n )\n\n @classmethod\n def create_collection_api(\n cls,\n token: str,\n database_name: str,\n new_collection_name: str,\n dimension: int | None = None,\n embedding_generation_provider: str | None = None,\n embedding_generation_model: str | None = None,\n ):\n client = DataAPIClient(token=token)\n api_endpoint = cls.get_api_endpoint_static(token=token, database_name=database_name)\n\n # Get the database object\n database = client.get_database(api_endpoint=api_endpoint, token=token)\n\n # Build vectorize options, if needed\n vectorize_options = None\n if not dimension:\n vectorize_options = CollectionVectorServiceOptions(\n provider=embedding_generation_provider,\n model_name=embedding_generation_model,\n authentication=None,\n parameters=None,\n )\n\n # Create the collection\n return database.create_collection(\n name=new_collection_name,\n dimension=dimension,\n service=vectorize_options,\n )\n\n @classmethod\n def get_database_list_static(cls, token: str, environment: str | None = None):\n client = DataAPIClient(token=token, environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the list of databases\n db_list = list(admin_client.list_databases())\n\n # Generate the api endpoint for each database\n db_info_dict = {}\n for db in db_list:\n try:\n api_endpoint = f\"https://{db.info.id}-{db.info.region}.apps.astra.datastax.com\"\n db_info_dict[db.info.name] = {\n \"api_endpoint\": api_endpoint,\n \"collections\": len(\n list(\n client.get_database(\n api_endpoint=api_endpoint, token=token, keyspace=db.info.keyspace\n ).list_collection_names(keyspace=db.info.keyspace)\n )\n ),\n }\n except Exception: # noqa: BLE001, S110\n pass\n\n return db_info_dict\n\n def get_database_list(self):\n return self.get_database_list_static(token=self.token, environment=self.environment)\n\n @classmethod\n def get_api_endpoint_static(\n cls,\n token: str,\n environment: str | None = None,\n database_name: str | None = None,\n ):\n # Check if the database_name is like a url\n if database_name and database_name.startswith(\"https://\"):\n return database_name\n\n # If the database is not set, nothing we can do.\n if not database_name:\n return None\n\n # Otherwise, get the URL from the database list\n return cls.get_database_list_static(token=token, environment=environment).get(database_name).get(\"api_endpoint\")\n\n def get_api_endpoint(self):\n return self.get_api_endpoint_static(\n token=self.token,\n environment=self.environment,\n database_name=self.api_endpoint,\n )\n\n def get_keyspace(self):\n keyspace = self.keyspace\n\n if keyspace:\n return keyspace.strip()\n\n return None\n\n def get_database_object(self):\n try:\n client = DataAPIClient(token=self.token, environment=self.environment)\n\n return client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n except Exception as e: # noqa: BLE001\n self.log(f\"Error getting database: {e}\")\n\n return None\n\n def collection_exists(self):\n try:\n client = DataAPIClient(token=self.token, environment=self.environment)\n database = client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n return self.collection_name in list(database.list_collection_names(keyspace=self.get_keyspace()))\n except Exception as e: # noqa: BLE001\n self.log(f\"Error getting collection status: {e}\")\n\n return False\n\n def collection_data(self, collection_name: str, database: Database | None = None):\n try:\n if not database:\n client = DataAPIClient(token=self.token, environment=self.environment)\n\n database = client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n collection = database.get_collection(collection_name, keyspace=self.get_keyspace())\n\n return collection.estimated_document_count()\n except Exception as e: # noqa: BLE001\n self.log(f\"Error checking collection data: {e}\")\n\n return None\n\n def get_vectorize_providers(self):\n try:\n self.log(\"Dynamically updating list of Vectorize providers.\")\n\n # Get the admin object\n admin = AstraDBAdmin(token=self.token)\n db_admin = admin.get_database_admin(api_endpoint=self.get_api_endpoint())\n\n # Get the list of embedding providers\n embedding_providers = db_admin.find_embedding_providers().as_dict()\n\n vectorize_providers_mapping = {}\n # Map the provider display name to the provider key and models\n for provider_key, provider_data in embedding_providers[\"embeddingProviders\"].items():\n display_name = provider_data[\"displayName\"]\n models = [model[\"name\"] for model in provider_data[\"models\"]]\n\n # TODO: https://astra.datastax.com/api/v2/graphql\n vectorize_providers_mapping[display_name] = [provider_key, models]\n\n # Sort the resulting dictionary\n return defaultdict(list, dict(sorted(vectorize_providers_mapping.items())))\n except Exception as e: # noqa: BLE001\n self.log(f\"Error fetching Vectorize providers: {e}\")\n\n return {}\n\n def _initialize_database_options(self):\n try:\n return [\n {\"name\": name, \"collections\": info[\"collections\"]} for name, info in self.get_database_list().items()\n ]\n except Exception as e: # noqa: BLE001\n self.log(f\"Error fetching databases: {e}\")\n\n return []\n\n def _initialize_collection_options(self):\n database = self.get_database_object()\n if database is None:\n return []\n\n try:\n collection_list = list(database.list_collections(keyspace=self.get_keyspace()))\n\n return [\n {\n \"name\": col.name,\n \"records\": self.collection_data(collection_name=col.name, database=database),\n \"provider\": (\n col.options.vector.service.provider\n if col.options.vector and col.options.vector.service\n else None\n ),\n \"icon\": \"\",\n \"model\": (\n col.options.vector.service.model_name\n if col.options.vector and col.options.vector.service\n else None\n ),\n }\n for col in collection_list\n ]\n except Exception as e: # noqa: BLE001\n self.log(f\"Error fetching collections: {e}\")\n\n return []\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n # Define variables for common database conditions a user may experience\n is_hosted = os.getenv(\"LANGFLOW_HOST\") is not None\n no_databases = \"options\" not in build_config[\"api_endpoint\"] or not build_config[\"api_endpoint\"][\"options\"]\n\n # Refresh the database name options\n if not is_hosted and (field_name in [\"token\", \"environment\"] or no_databases):\n # Get the list of options we have based on the token provided\n database_options = self._initialize_database_options()\n\n # Reset the collection values selected\n build_config[\"collection_name\"][\"options\"] = []\n build_config[\"collection_name\"][\"options_metadata\"] = []\n build_config[\"collection_name\"][\"value\"] = \"\"\n\n # Scenario #1: We have database options from the provided token\n if database_options:\n # Reset the selected database\n build_config[\"api_endpoint\"][\"name\"] = \"Database\"\n build_config[\"api_endpoint\"][\"display_name\"] = \"Database\"\n\n # If we retrieved options based on the token, show the dropdown\n build_config[\"api_endpoint\"][\"options\"] = [db[\"name\"] for db in database_options]\n build_config[\"api_endpoint\"][\"options_metadata\"] = [\n {k: v for k, v in db.items() if k not in [\"name\"]} for db in database_options\n ]\n # Scenario #2: We have no options from the provided token\n else:\n # Fallback to an API Endpoint if we couldn't retrieve options\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n build_config[\"api_endpoint\"][\"name\"] = \"API Endpoint\"\n build_config[\"api_endpoint\"][\"display_name\"] = \"Astra DB API Endpoint\"\n\n # If we didn't retrieve options based on the token, show the text input\n if \"options\" in build_config[\"api_endpoint\"]:\n del build_config[\"api_endpoint\"][\"options\"]\n\n # Get list of regions for a given cloud provider\n \"\"\"\n cloud_provider = (\n build_config[\"api_endpoint\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\"cloud_provider\"][\n \"value\"\n ]\n or \"Amazon Web Services\"\n )\n build_config[\"api_endpoint\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\"region\"][\n \"options\"\n ] = self.map_cloud_providers()[cloud_provider][\"regions\"]\n \"\"\"\n\n # Define variables for common collection conditions a user may experience\n no_collections = not build_config[\"collection_name\"][\"options\"]\n\n # Refresh the collection name options\n if field_name == \"api_endpoint\" or (field_name == \"collection_name\" and no_collections):\n # Reset the selected collection\n build_config[\"collection_name\"][\"value\"] = \"\"\n\n # Reload the list of collections and metadata associated\n collection_options = self._initialize_collection_options()\n build_config[\"collection_name\"][\"options\"] = [col[\"name\"] for col in collection_options]\n build_config[\"collection_name\"][\"options_metadata\"] = [\n {k: v for k, v in col.items() if k not in [\"name\"]} for col in collection_options\n ]\n\n # Define variables for common collection choice conditions a user may experience\n collection_chosen = field_value and build_config[\"collection_name\"][\"options\"]\n\n # Hide embedding model option if opriona_metadata provider is not null\n if field_name == \"collection_name\" and collection_chosen:\n # Find location of the name in the options list\n index_of_name = build_config[\"collection_name\"][\"options\"].index(field_value)\n value_of_provider = build_config[\"collection_name\"][\"options_metadata\"][index_of_name][\"provider\"]\n\n # If we were able to determine the Vectorize provider, set it accordingly\n if value_of_provider:\n build_config[\"embedding_model\"][\"advanced\"] = True\n build_config[\"embedding_choice\"][\"value\"] = \"Astra Vectorize\"\n else:\n build_config[\"embedding_model\"][\"advanced\"] = False\n build_config[\"embedding_choice\"][\"value\"] = \"Embedding Model\"\n\n # For the final step, get the list of vectorize providers\n \"\"\"\n vectorize_providers = self.get_vectorize_providers()\n if not vectorize_providers:\n return build_config\n\n # Allow the user to see the embedding provider options\n provider_options = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options\"]\n if not provider_options:\n # If the collection is set, allow user to see embedding options\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options\"] = [\"Bring your own\", \"Nvidia\", *[key for key in vectorize_providers if key != \"Nvidia\"]]\n\n # And allow the user to see the models based on a selected provider\n model_options = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_model\"\n ][\"options\"]\n if not model_options:\n embedding_provider = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"value\"]\n\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_model\"\n ][\"options\"] = vectorize_providers.get(embedding_provider, [[], []])[1]\n \"\"\"\n\n return build_config\n\n @check_cached_vector_store\n def build_vector_store(self):\n try:\n from langchain_astradb import AstraDBVectorStore\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n # Get the embedding model and additional params\n embedding_params = (\n {\"embedding\": self.embedding_model}\n if self.embedding_model and self.embedding_choice == \"Embedding Model\"\n else {}\n )\n\n additional_params = self.astradb_vectorstore_kwargs or {}\n\n # Get Langflow version and platform information\n __version__ = get_version_info()[\"version\"]\n langflow_prefix = \"\"\n if os.getenv(\"LANGFLOW_HOST\") is not None:\n langflow_prefix = \"ds-\"\n\n # Bundle up the auto-detect parameters\n autodetect_params = {\n \"autodetect_collection\": self.collection_exists(), # TODO: May want to expose this option\n \"content_field\": (\n self.content_field\n if self.content_field and embedding_params\n else (\n \"page_content\"\n if embedding_params and self.collection_data(collection_name=self.collection_name) == 0\n else None\n )\n ),\n \"ignore_invalid_documents\": self.ignore_invalid_documents,\n }\n\n # Attempt to build the Vector Store object\n try:\n vector_store = AstraDBVectorStore(\n # Astra DB Authentication Parameters\n token=self.token,\n api_endpoint=self.get_api_endpoint(),\n namespace=self.get_keyspace(),\n collection_name=self.collection_name,\n environment=self.environment,\n # Astra DB Usage Tracking Parameters\n ext_callers=[(f\"{langflow_prefix}langflow\", __version__)],\n # Astra DB Vector Store Parameters\n **autodetect_params,\n **embedding_params,\n **additional_params,\n )\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store) -> None:\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents and self.deletion_field:\n self.log(f\"Deleting documents where {self.deletion_field}\")\n try:\n database = self.get_database_object()\n collection = database.get_collection(self.collection_name, keyspace=self.get_keyspace())\n delete_values = list({doc.metadata[self.deletion_field] for doc in documents})\n self.log(f\"Deleting documents where {self.deletion_field} matches {delete_values}.\")\n collection.delete_many({f\"metadata.{self.deletion_field}\": {\"$in\": delete_values}})\n except Exception as e:\n msg = f\"Error deleting documents from AstraDBVectorStore based on '{self.deletion_field}': {e}\"\n raise ValueError(msg) from e\n\n if documents:\n self.log(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n self.log(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self) -> str:\n search_type_mapping = {\n \"Similarity with score threshold\": \"similarity_score_threshold\",\n \"MMR (Max Marginal Relevance)\": \"mmr\",\n }\n\n return search_type_mapping.get(self.search_type, \"similarity\")\n\n def _build_search_args(self):\n query = self.search_query if isinstance(self.search_query, str) and self.search_query.strip() else None\n\n if query:\n args = {\n \"query\": query,\n \"search_type\": self._map_search_type(),\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n elif self.advanced_search_filter:\n args = {\n \"n\": self.number_of_results,\n }\n else:\n return {}\n\n filter_arg = self.advanced_search_filter or {}\n if filter_arg:\n args[\"filter\"] = filter_arg\n\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n vector_store = vector_store or self.build_vector_store()\n\n self.log(f\"Search input: {self.search_query}\")\n self.log(f\"Search type: {self.search_type}\")\n self.log(f\"Number of results: {self.number_of_results}\")\n\n try:\n search_args = self._build_search_args()\n except Exception as e:\n msg = f\"Error in AstraDBVectorStore._build_search_args: {e}\"\n raise ValueError(msg) from e\n\n if not search_args:\n self.log(\"No search input or filters provided. Skipping search.\")\n return []\n\n docs = []\n search_method = \"search\" if \"query\" in search_args else \"metadata_search\"\n\n try:\n self.log(f\"Calling vector_store.{search_method} with args: {search_args}\")\n docs = getattr(vector_store, search_method)(**search_args)\n except Exception as e:\n msg = f\"Error performing {search_method} in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self.log(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n self.log(f\"Converted documents to data: {len(data)}\")\n self.status = data\n\n return data\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" + "value": "import os\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\n\nfrom astrapy import AstraDBAdmin, DataAPIClient, Database\nfrom langchain_astradb import AstraDBVectorStore, CollectionVectorServiceOptions\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import FloatInput, NestedDictInput\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\nfrom langflow.utils.version import get_version_info\n\n\nclass AstraDBVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Ingest and search documents in Astra DB\"\n documentation: str = \"https://docs.datastax.com/en/langflow/astra-components.html\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n _cached_vector_store: AstraDBVectorStore | None = None\n\n @dataclass\n class NewDatabaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"description\": \"Create a new database in Astra DB.\",\n \"display_name\": \"Create New Database\",\n \"field_order\": [\"new_database_name\", \"cloud_provider\", \"region\"],\n \"template\": {\n \"new_database_name\": StrInput(\n name=\"new_database_name\",\n display_name=\"New Database Name\",\n info=\"Name of the new database to create in Astra DB.\",\n required=True,\n ),\n \"cloud_provider\": DropdownInput(\n name=\"cloud_provider\",\n display_name=\"Cloud Provider\",\n info=\"Cloud provider for the new database.\",\n options=[\"Amazon Web Services\", \"Google Cloud Platform\", \"Microsoft Azure\"],\n required=True,\n ),\n \"region\": DropdownInput(\n name=\"region\",\n display_name=\"Region\",\n info=\"Region for the new database.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n @dataclass\n class NewCollectionInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"description\": \"Create a new collection in Astra DB.\",\n \"display_name\": \"Create New Collection\",\n \"field_order\": [\n \"new_collection_name\",\n \"embedding_generation_provider\",\n \"embedding_generation_model\",\n ],\n \"template\": {\n \"new_collection_name\": StrInput(\n name=\"new_collection_name\",\n display_name=\"New Collection Name\",\n info=\"Name of the new collection to create in Astra DB.\",\n required=True,\n ),\n \"embedding_generation_provider\": DropdownInput(\n name=\"embedding_generation_provider\",\n display_name=\"Embedding Generation Provider\",\n info=\"Provider to use for generating embeddings.\",\n options=[],\n required=True,\n ),\n \"embedding_generation_model\": DropdownInput(\n name=\"embedding_generation_model\",\n display_name=\"Embedding Generation Model\",\n info=\"Model to use for generating embeddings.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n real_time_refresh=True,\n input_types=[],\n ),\n StrInput(\n name=\"environment\",\n display_name=\"Environment\",\n info=\"The environment for the Astra DB API Endpoint.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"api_endpoint\",\n display_name=\"Database\",\n info=\"The Database / API Endpoint for the Astra DB instance.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n combobox=True,\n ),\n DropdownInput(\n name=\"collection_name\",\n display_name=\"Collection\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n # dialog_inputs=asdict(NewCollectionInput()),\n combobox=True,\n ),\n StrInput(\n name=\"keyspace\",\n display_name=\"Keyspace\",\n info=\"Optional keyspace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_choice\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Choose an embedding model or use Astra Vectorize.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n value=\"Embedding Model\",\n advanced=True,\n real_time_refresh=True,\n ),\n HandleInput(\n name=\"embedding_model\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Specify the Embedding Model. Not required for Astra Vectorize collections.\",\n required=False,\n ),\n *LCVectorStoreComponent.inputs,\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Search Results\",\n info=\"Number of search results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n NestedDictInput(\n name=\"advanced_search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n ),\n StrInput(\n name=\"content_field\",\n display_name=\"Content Field\",\n info=\"Field to use as the text content field for the vector store.\",\n advanced=True,\n ),\n StrInput(\n name=\"deletion_field\",\n display_name=\"Deletion Based On Field\",\n info=\"When this parameter is provided, documents in the target collection with \"\n \"metadata field values matching the input metadata field value will be deleted \"\n \"before new data is loaded.\",\n advanced=True,\n ),\n BoolInput(\n name=\"ignore_invalid_documents\",\n display_name=\"Ignore Invalid Documents\",\n info=\"Boolean flag to determine whether to ignore invalid documents at runtime.\",\n advanced=True,\n ),\n NestedDictInput(\n name=\"astradb_vectorstore_kwargs\",\n display_name=\"AstraDBVectorStore Parameters\",\n info=\"Optional dictionary of additional parameters for the AstraDBVectorStore.\",\n advanced=True,\n ),\n ]\n\n @classmethod\n def map_cloud_providers(cls):\n return {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-east-2\", \"ap-south-1\", \"eu-west-1\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-east1\"],\n },\n \"Microsoft Azure\": {\n \"id\": \"azure\",\n \"regions\": [\"westus3\"],\n },\n }\n\n @classmethod\n def create_database_api(\n cls,\n token: str,\n new_database_name: str,\n cloud_provider: str,\n region: str,\n ):\n client = DataAPIClient(token=token)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Call the create database function\n return admin_client.create_database(\n name=new_database_name,\n cloud_provider=cloud_provider,\n region=region,\n )\n\n @classmethod\n def create_collection_api(\n cls,\n token: str,\n database_name: str,\n new_collection_name: str,\n dimension: int | None = None,\n embedding_generation_provider: str | None = None,\n embedding_generation_model: str | None = None,\n ):\n client = DataAPIClient(token=token)\n api_endpoint = cls.get_api_endpoint_static(token=token, database_name=database_name)\n\n # Get the database object\n database = client.get_database(api_endpoint=api_endpoint, token=token)\n\n # Build vectorize options, if needed\n vectorize_options = None\n if not dimension:\n vectorize_options = CollectionVectorServiceOptions(\n provider=embedding_generation_provider,\n model_name=embedding_generation_model,\n authentication=None,\n parameters=None,\n )\n\n # Create the collection\n return database.create_collection(\n name=new_collection_name,\n dimension=dimension,\n service=vectorize_options,\n )\n\n @classmethod\n def get_database_list_static(cls, token: str, environment: str | None = None):\n client = DataAPIClient(token=token, environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the list of databases\n db_list = list(admin_client.list_databases())\n\n # Generate the api endpoint for each database\n db_info_dict = {}\n for db in db_list:\n try:\n api_endpoint = f\"https://{db.info.id}-{db.info.region}.apps.astra.datastax.com\"\n db_info_dict[db.info.name] = {\n \"api_endpoint\": api_endpoint,\n \"collections\": len(\n list(\n client.get_database(\n api_endpoint=api_endpoint, token=token, keyspace=db.info.keyspace\n ).list_collection_names(keyspace=db.info.keyspace)\n )\n ),\n }\n except Exception: # noqa: BLE001, S110\n pass\n\n return db_info_dict\n\n def get_database_list(self):\n return self.get_database_list_static(token=self.token, environment=self.environment)\n\n @classmethod\n def get_api_endpoint_static(\n cls,\n token: str,\n environment: str | None = None,\n database_name: str | None = None,\n ):\n # Check if the database_name is like a url\n if database_name and database_name.startswith(\"https://\"):\n return database_name\n\n # If the database is not set, nothing we can do.\n if not database_name:\n return None\n\n # Otherwise, get the URL from the database list\n return cls.get_database_list_static(token=token, environment=environment).get(database_name).get(\"api_endpoint\")\n\n def get_api_endpoint(self):\n return self.get_api_endpoint_static(\n token=self.token,\n environment=self.environment,\n database_name=self.api_endpoint,\n )\n\n def get_keyspace(self):\n keyspace = self.keyspace\n\n if keyspace:\n return keyspace.strip()\n\n return None\n\n def get_database_object(self):\n try:\n client = DataAPIClient(token=self.token, environment=self.environment)\n\n return client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n except Exception as e: # noqa: BLE001\n self.log(f\"Error getting database: {e}\")\n\n return None\n\n def collection_exists(self):\n try:\n client = DataAPIClient(token=self.token, environment=self.environment)\n database = client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n return self.collection_name in list(database.list_collection_names(keyspace=self.get_keyspace()))\n except Exception as e: # noqa: BLE001\n self.log(f\"Error getting collection status: {e}\")\n\n return False\n\n def collection_data(self, collection_name: str, database: Database | None = None):\n try:\n if not database:\n client = DataAPIClient(token=self.token, environment=self.environment)\n\n database = client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n collection = database.get_collection(collection_name, keyspace=self.get_keyspace())\n\n return collection.estimated_document_count()\n except Exception as e: # noqa: BLE001\n self.log(f\"Error checking collection data: {e}\")\n\n return None\n\n def get_vectorize_providers(self):\n try:\n self.log(\"Dynamically updating list of Vectorize providers.\")\n\n # Get the admin object\n admin = AstraDBAdmin(token=self.token)\n db_admin = admin.get_database_admin(api_endpoint=self.get_api_endpoint())\n\n # Get the list of embedding providers\n embedding_providers = db_admin.find_embedding_providers().as_dict()\n\n vectorize_providers_mapping = {}\n # Map the provider display name to the provider key and models\n for provider_key, provider_data in embedding_providers[\"embeddingProviders\"].items():\n display_name = provider_data[\"displayName\"]\n models = [model[\"name\"] for model in provider_data[\"models\"]]\n\n # TODO: https://astra.datastax.com/api/v2/graphql\n vectorize_providers_mapping[display_name] = [provider_key, models]\n\n # Sort the resulting dictionary\n return defaultdict(list, dict(sorted(vectorize_providers_mapping.items())))\n except Exception as e: # noqa: BLE001\n self.log(f\"Error fetching Vectorize providers: {e}\")\n\n return {}\n\n def _initialize_database_options(self):\n try:\n return [\n {\"name\": name, \"collections\": info[\"collections\"]} for name, info in self.get_database_list().items()\n ]\n except Exception as e: # noqa: BLE001\n self.log(f\"Error fetching databases: {e}\")\n\n return []\n\n def _initialize_collection_options(self):\n database = self.get_database_object()\n if database is None:\n return []\n\n try:\n collection_list = list(database.list_collections(keyspace=self.get_keyspace()))\n\n return [\n {\n \"name\": col.name,\n \"records\": self.collection_data(collection_name=col.name, database=database),\n \"provider\": (\n col.options.vector.service.provider\n if col.options.vector and col.options.vector.service\n else None\n ),\n \"icon\": \"\",\n \"model\": (\n col.options.vector.service.model_name\n if col.options.vector and col.options.vector.service\n else None\n ),\n }\n for col in collection_list\n ]\n except Exception as e: # noqa: BLE001\n self.log(f\"Error fetching collections: {e}\")\n\n return []\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n # Define variables for common database conditions a user may experience\n is_hosted = os.getenv(\"LANGFLOW_HOST\") is not None\n no_databases = \"options\" not in build_config[\"api_endpoint\"] or not build_config[\"api_endpoint\"][\"options\"]\n no_api_endpoint = not build_config[\"api_endpoint\"][\"value\"]\n\n # Refresh the database name options\n if not is_hosted and (field_name in [\"token\", \"environment\"] or (no_databases and no_api_endpoint)):\n # Get the list of options we have based on the token provided\n database_options = self._initialize_database_options()\n\n # Reset the collection values selected\n build_config[\"collection_name\"][\"options\"] = []\n build_config[\"collection_name\"][\"options_metadata\"] = []\n build_config[\"collection_name\"][\"value\"] = \"\"\n\n # Scenario #1: We have database options from the provided token\n if database_options:\n # Reset the selected database\n build_config[\"api_endpoint\"][\"name\"] = \"Database\"\n build_config[\"api_endpoint\"][\"display_name\"] = \"Database\"\n\n # If we retrieved options based on the token, show the dropdown\n build_config[\"api_endpoint\"][\"options\"] = [db[\"name\"] for db in database_options]\n build_config[\"api_endpoint\"][\"options_metadata\"] = [\n {k: v for k, v in db.items() if k not in [\"name\"]} for db in database_options\n ]\n # Scenario #2: We have no options from the provided token\n else:\n # Fallback to an API Endpoint if we couldn't retrieve options\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n build_config[\"api_endpoint\"][\"name\"] = \"API Endpoint\"\n build_config[\"api_endpoint\"][\"display_name\"] = \"Astra DB API Endpoint\"\n\n # If we didn't retrieve options based on the token, show the text input\n if \"options\" in build_config[\"api_endpoint\"]:\n del build_config[\"api_endpoint\"][\"options\"]\n\n # Get list of regions for a given cloud provider\n \"\"\"\n cloud_provider = (\n build_config[\"api_endpoint\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\"cloud_provider\"][\n \"value\"\n ]\n or \"Amazon Web Services\"\n )\n build_config[\"api_endpoint\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\"region\"][\n \"options\"\n ] = self.map_cloud_providers()[cloud_provider][\"regions\"]\n \"\"\"\n\n # Refresh the collection name options\n if field_name == \"api_endpoint\":\n # Reset the selected collection\n build_config[\"collection_name\"][\"value\"] = \"\"\n\n # Reload the list of collections and metadata associated\n collection_options = self._initialize_collection_options()\n build_config[\"collection_name\"][\"options\"] = [col[\"name\"] for col in collection_options]\n build_config[\"collection_name\"][\"options_metadata\"] = [\n {k: v for k, v in col.items() if k not in [\"name\"]} for col in collection_options\n ]\n\n # Hide embedding model option if opriona_metadata provider is not null\n if field_name == \"collection_name\" and field_value:\n # Set the options for collection name to be the field value if its a new collection\n if not is_hosted and field_value not in build_config[\"collection_name\"][\"options\"]:\n build_config[\"collection_name\"][\"options\"].append(field_value)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\"records\": 0, \"provider\": None, \"icon\": \"\", \"model\": None}\n )\n\n # Find location of the name in the options list\n index_of_name = build_config[\"collection_name\"][\"options\"].index(field_value)\n value_of_provider = build_config[\"collection_name\"][\"options_metadata\"][index_of_name][\"provider\"]\n\n # If we were able to determine the Vectorize provider, set it accordingly\n if value_of_provider:\n build_config[\"embedding_model\"][\"advanced\"] = True\n build_config[\"embedding_choice\"][\"value\"] = \"Astra Vectorize\"\n else:\n build_config[\"embedding_model\"][\"advanced\"] = False\n build_config[\"embedding_choice\"][\"value\"] = \"Embedding Model\"\n\n # For the final step, get the list of vectorize providers\n \"\"\"\n vectorize_providers = self.get_vectorize_providers()\n if not vectorize_providers:\n return build_config\n\n # Allow the user to see the embedding provider options\n provider_options = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options\"]\n if not provider_options:\n # If the collection is set, allow user to see embedding options\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options\"] = [\"Bring your own\", \"Nvidia\", *[key for key in vectorize_providers if key != \"Nvidia\"]]\n\n # And allow the user to see the models based on a selected provider\n model_options = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_model\"\n ][\"options\"]\n if not model_options:\n embedding_provider = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"value\"]\n\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_model\"\n ][\"options\"] = vectorize_providers.get(embedding_provider, [[], []])[1]\n \"\"\"\n\n return build_config\n\n @check_cached_vector_store\n def build_vector_store(self):\n try:\n from langchain_astradb import AstraDBVectorStore\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n # Get the embedding model and additional params\n embedding_params = (\n {\"embedding\": self.embedding_model}\n if self.embedding_model and self.embedding_choice == \"Embedding Model\"\n else {}\n )\n\n additional_params = self.astradb_vectorstore_kwargs or {}\n\n # Get Langflow version and platform information\n __version__ = get_version_info()[\"version\"]\n langflow_prefix = \"\"\n if os.getenv(\"LANGFLOW_HOST\") is not None:\n langflow_prefix = \"ds-\"\n\n # Bundle up the auto-detect parameters\n autodetect_params = {\n \"autodetect_collection\": self.collection_exists(), # TODO: May want to expose this option\n \"content_field\": (\n self.content_field\n if self.content_field and embedding_params\n else (\n \"page_content\"\n if embedding_params and self.collection_data(collection_name=self.collection_name) == 0\n else None\n )\n ),\n \"ignore_invalid_documents\": self.ignore_invalid_documents,\n }\n\n # Attempt to build the Vector Store object\n try:\n vector_store = AstraDBVectorStore(\n # Astra DB Authentication Parameters\n token=self.token,\n api_endpoint=self.get_api_endpoint(),\n namespace=self.get_keyspace(),\n collection_name=self.collection_name,\n environment=self.environment,\n # Astra DB Usage Tracking Parameters\n ext_callers=[(f\"{langflow_prefix}langflow\", __version__)],\n # Astra DB Vector Store Parameters\n **autodetect_params,\n **embedding_params,\n **additional_params,\n )\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store) -> None:\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents and self.deletion_field:\n self.log(f\"Deleting documents where {self.deletion_field}\")\n try:\n database = self.get_database_object()\n collection = database.get_collection(self.collection_name, keyspace=self.get_keyspace())\n delete_values = list({doc.metadata[self.deletion_field] for doc in documents})\n self.log(f\"Deleting documents where {self.deletion_field} matches {delete_values}.\")\n collection.delete_many({f\"metadata.{self.deletion_field}\": {\"$in\": delete_values}})\n except Exception as e:\n msg = f\"Error deleting documents from AstraDBVectorStore based on '{self.deletion_field}': {e}\"\n raise ValueError(msg) from e\n\n if documents:\n self.log(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n self.log(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self) -> str:\n search_type_mapping = {\n \"Similarity with score threshold\": \"similarity_score_threshold\",\n \"MMR (Max Marginal Relevance)\": \"mmr\",\n }\n\n return search_type_mapping.get(self.search_type, \"similarity\")\n\n def _build_search_args(self):\n query = self.search_query if isinstance(self.search_query, str) and self.search_query.strip() else None\n\n if query:\n args = {\n \"query\": query,\n \"search_type\": self._map_search_type(),\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n elif self.advanced_search_filter:\n args = {\n \"n\": self.number_of_results,\n }\n else:\n return {}\n\n filter_arg = self.advanced_search_filter or {}\n if filter_arg:\n args[\"filter\"] = filter_arg\n\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n vector_store = vector_store or self.build_vector_store()\n\n self.log(f\"Search input: {self.search_query}\")\n self.log(f\"Search type: {self.search_type}\")\n self.log(f\"Number of results: {self.number_of_results}\")\n\n try:\n search_args = self._build_search_args()\n except Exception as e:\n msg = f\"Error in AstraDBVectorStore._build_search_args: {e}\"\n raise ValueError(msg) from e\n\n if not search_args:\n self.log(\"No search input or filters provided. Skipping search.\")\n return []\n\n docs = []\n search_method = \"search\" if \"query\" in search_args else \"metadata_search\"\n\n try:\n self.log(f\"Calling vector_store.{search_method} with args: {search_args}\")\n docs = getattr(vector_store, search_method)(**search_args)\n except Exception as e:\n msg = f\"Error performing {search_method} in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self.log(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n self.log(f\"Converted documents to data: {len(data)}\")\n self.status = data\n\n return data\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" }, "collection_name": { "_input_type": "DropdownInput", @@ -3561,21 +3561,21 @@ "type": "AstraDB" }, "dragging": false, - "id": "AstraDB-OPirQ", + "id": "AstraDB-q0CNq", "measured": { "height": 614, "width": 320 }, "position": { - "x": 1214.160633250919, - "y": 580.5342255634779 + "x": 1221.7808624943825, + "y": 598.7224891255499 }, "selected": false, "type": "genericNode" }, { "data": { - "id": "AstraDB-On9gj", + "id": "AstraDB-DY2V0", "node": { "base_classes": [ "Data", @@ -3724,7 +3724,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import os\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\n\nfrom astrapy import AstraDBAdmin, DataAPIClient, Database\nfrom langchain_astradb import AstraDBVectorStore, CollectionVectorServiceOptions\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import FloatInput, NestedDictInput\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\nfrom langflow.utils.version import get_version_info\n\n\nclass AstraDBVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Ingest and search documents in Astra DB\"\n documentation: str = \"https://docs.datastax.com/en/langflow/astra-components.html\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n _cached_vector_store: AstraDBVectorStore | None = None\n\n @dataclass\n class NewDatabaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"description\": \"Create a new database in Astra DB.\",\n \"display_name\": \"Create New Database\",\n \"field_order\": [\"new_database_name\", \"cloud_provider\", \"region\"],\n \"template\": {\n \"new_database_name\": StrInput(\n name=\"new_database_name\",\n display_name=\"New Database Name\",\n info=\"Name of the new database to create in Astra DB.\",\n required=True,\n ),\n \"cloud_provider\": DropdownInput(\n name=\"cloud_provider\",\n display_name=\"Cloud Provider\",\n info=\"Cloud provider for the new database.\",\n options=[\"Amazon Web Services\", \"Google Cloud Platform\", \"Microsoft Azure\"],\n required=True,\n ),\n \"region\": DropdownInput(\n name=\"region\",\n display_name=\"Region\",\n info=\"Region for the new database.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n @dataclass\n class NewCollectionInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"description\": \"Create a new collection in Astra DB.\",\n \"display_name\": \"Create New Collection\",\n \"field_order\": [\n \"new_collection_name\",\n \"embedding_generation_provider\",\n \"embedding_generation_model\",\n ],\n \"template\": {\n \"new_collection_name\": StrInput(\n name=\"new_collection_name\",\n display_name=\"New Collection Name\",\n info=\"Name of the new collection to create in Astra DB.\",\n required=True,\n ),\n \"embedding_generation_provider\": DropdownInput(\n name=\"embedding_generation_provider\",\n display_name=\"Embedding Generation Provider\",\n info=\"Provider to use for generating embeddings.\",\n options=[],\n required=True,\n ),\n \"embedding_generation_model\": DropdownInput(\n name=\"embedding_generation_model\",\n display_name=\"Embedding Generation Model\",\n info=\"Model to use for generating embeddings.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n real_time_refresh=True,\n input_types=[],\n ),\n StrInput(\n name=\"environment\",\n display_name=\"Environment\",\n info=\"The environment for the Astra DB API Endpoint.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"api_endpoint\",\n display_name=\"Database\",\n info=\"The Database / API Endpoint for the Astra DB instance.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n combobox=True,\n ),\n DropdownInput(\n name=\"collection_name\",\n display_name=\"Collection\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n # dialog_inputs=asdict(NewCollectionInput()),\n combobox=True,\n ),\n StrInput(\n name=\"keyspace\",\n display_name=\"Keyspace\",\n info=\"Optional keyspace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_choice\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Choose an embedding model or use Astra Vectorize.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n value=\"Embedding Model\",\n advanced=True,\n real_time_refresh=True,\n ),\n HandleInput(\n name=\"embedding_model\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Specify the Embedding Model. Not required for Astra Vectorize collections.\",\n required=False,\n ),\n *LCVectorStoreComponent.inputs,\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Search Results\",\n info=\"Number of search results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n NestedDictInput(\n name=\"advanced_search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n ),\n StrInput(\n name=\"content_field\",\n display_name=\"Content Field\",\n info=\"Field to use as the text content field for the vector store.\",\n advanced=True,\n ),\n StrInput(\n name=\"deletion_field\",\n display_name=\"Deletion Based On Field\",\n info=\"When this parameter is provided, documents in the target collection with \"\n \"metadata field values matching the input metadata field value will be deleted \"\n \"before new data is loaded.\",\n advanced=True,\n ),\n BoolInput(\n name=\"ignore_invalid_documents\",\n display_name=\"Ignore Invalid Documents\",\n info=\"Boolean flag to determine whether to ignore invalid documents at runtime.\",\n advanced=True,\n ),\n NestedDictInput(\n name=\"astradb_vectorstore_kwargs\",\n display_name=\"AstraDBVectorStore Parameters\",\n info=\"Optional dictionary of additional parameters for the AstraDBVectorStore.\",\n advanced=True,\n ),\n ]\n\n @classmethod\n def map_cloud_providers(cls):\n return {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-east-2\", \"ap-south-1\", \"eu-west-1\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-east1\"],\n },\n \"Microsoft Azure\": {\n \"id\": \"azure\",\n \"regions\": [\"westus3\"],\n },\n }\n\n @classmethod\n def create_database_api(\n cls,\n token: str,\n new_database_name: str,\n cloud_provider: str,\n region: str,\n ):\n client = DataAPIClient(token=token)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Call the create database function\n return admin_client.create_database(\n name=new_database_name,\n cloud_provider=cloud_provider,\n region=region,\n )\n\n @classmethod\n def create_collection_api(\n cls,\n token: str,\n database_name: str,\n new_collection_name: str,\n dimension: int | None = None,\n embedding_generation_provider: str | None = None,\n embedding_generation_model: str | None = None,\n ):\n client = DataAPIClient(token=token)\n api_endpoint = cls.get_api_endpoint_static(token=token, database_name=database_name)\n\n # Get the database object\n database = client.get_database(api_endpoint=api_endpoint, token=token)\n\n # Build vectorize options, if needed\n vectorize_options = None\n if not dimension:\n vectorize_options = CollectionVectorServiceOptions(\n provider=embedding_generation_provider,\n model_name=embedding_generation_model,\n authentication=None,\n parameters=None,\n )\n\n # Create the collection\n return database.create_collection(\n name=new_collection_name,\n dimension=dimension,\n service=vectorize_options,\n )\n\n @classmethod\n def get_database_list_static(cls, token: str, environment: str | None = None):\n client = DataAPIClient(token=token, environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the list of databases\n db_list = list(admin_client.list_databases())\n\n # Generate the api endpoint for each database\n db_info_dict = {}\n for db in db_list:\n try:\n api_endpoint = f\"https://{db.info.id}-{db.info.region}.apps.astra.datastax.com\"\n db_info_dict[db.info.name] = {\n \"api_endpoint\": api_endpoint,\n \"collections\": len(\n list(\n client.get_database(\n api_endpoint=api_endpoint, token=token, keyspace=db.info.keyspace\n ).list_collection_names(keyspace=db.info.keyspace)\n )\n ),\n }\n except Exception: # noqa: BLE001, S110\n pass\n\n return db_info_dict\n\n def get_database_list(self):\n return self.get_database_list_static(token=self.token, environment=self.environment)\n\n @classmethod\n def get_api_endpoint_static(\n cls,\n token: str,\n environment: str | None = None,\n database_name: str | None = None,\n ):\n # Check if the database_name is like a url\n if database_name and database_name.startswith(\"https://\"):\n return database_name\n\n # If the database is not set, nothing we can do.\n if not database_name:\n return None\n\n # Otherwise, get the URL from the database list\n return cls.get_database_list_static(token=token, environment=environment).get(database_name).get(\"api_endpoint\")\n\n def get_api_endpoint(self):\n return self.get_api_endpoint_static(\n token=self.token,\n environment=self.environment,\n database_name=self.api_endpoint,\n )\n\n def get_keyspace(self):\n keyspace = self.keyspace\n\n if keyspace:\n return keyspace.strip()\n\n return None\n\n def get_database_object(self):\n try:\n client = DataAPIClient(token=self.token, environment=self.environment)\n\n return client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n except Exception as e: # noqa: BLE001\n self.log(f\"Error getting database: {e}\")\n\n return None\n\n def collection_exists(self):\n try:\n client = DataAPIClient(token=self.token, environment=self.environment)\n database = client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n return self.collection_name in list(database.list_collection_names(keyspace=self.get_keyspace()))\n except Exception as e: # noqa: BLE001\n self.log(f\"Error getting collection status: {e}\")\n\n return False\n\n def collection_data(self, collection_name: str, database: Database | None = None):\n try:\n if not database:\n client = DataAPIClient(token=self.token, environment=self.environment)\n\n database = client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n collection = database.get_collection(collection_name, keyspace=self.get_keyspace())\n\n return collection.estimated_document_count()\n except Exception as e: # noqa: BLE001\n self.log(f\"Error checking collection data: {e}\")\n\n return None\n\n def get_vectorize_providers(self):\n try:\n self.log(\"Dynamically updating list of Vectorize providers.\")\n\n # Get the admin object\n admin = AstraDBAdmin(token=self.token)\n db_admin = admin.get_database_admin(api_endpoint=self.get_api_endpoint())\n\n # Get the list of embedding providers\n embedding_providers = db_admin.find_embedding_providers().as_dict()\n\n vectorize_providers_mapping = {}\n # Map the provider display name to the provider key and models\n for provider_key, provider_data in embedding_providers[\"embeddingProviders\"].items():\n display_name = provider_data[\"displayName\"]\n models = [model[\"name\"] for model in provider_data[\"models\"]]\n\n # TODO: https://astra.datastax.com/api/v2/graphql\n vectorize_providers_mapping[display_name] = [provider_key, models]\n\n # Sort the resulting dictionary\n return defaultdict(list, dict(sorted(vectorize_providers_mapping.items())))\n except Exception as e: # noqa: BLE001\n self.log(f\"Error fetching Vectorize providers: {e}\")\n\n return {}\n\n def _initialize_database_options(self):\n try:\n return [\n {\"name\": name, \"collections\": info[\"collections\"]} for name, info in self.get_database_list().items()\n ]\n except Exception as e: # noqa: BLE001\n self.log(f\"Error fetching databases: {e}\")\n\n return []\n\n def _initialize_collection_options(self):\n database = self.get_database_object()\n if database is None:\n return []\n\n try:\n collection_list = list(database.list_collections(keyspace=self.get_keyspace()))\n\n return [\n {\n \"name\": col.name,\n \"records\": self.collection_data(collection_name=col.name, database=database),\n \"provider\": (\n col.options.vector.service.provider\n if col.options.vector and col.options.vector.service\n else None\n ),\n \"icon\": \"\",\n \"model\": (\n col.options.vector.service.model_name\n if col.options.vector and col.options.vector.service\n else None\n ),\n }\n for col in collection_list\n ]\n except Exception as e: # noqa: BLE001\n self.log(f\"Error fetching collections: {e}\")\n\n return []\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n # Define variables for common database conditions a user may experience\n is_hosted = os.getenv(\"LANGFLOW_HOST\") is not None\n no_databases = \"options\" not in build_config[\"api_endpoint\"] or not build_config[\"api_endpoint\"][\"options\"]\n\n # Refresh the database name options\n if not is_hosted and (field_name in [\"token\", \"environment\"] or no_databases):\n # Get the list of options we have based on the token provided\n database_options = self._initialize_database_options()\n\n # Reset the collection values selected\n build_config[\"collection_name\"][\"options\"] = []\n build_config[\"collection_name\"][\"options_metadata\"] = []\n build_config[\"collection_name\"][\"value\"] = \"\"\n\n # Scenario #1: We have database options from the provided token\n if database_options:\n # Reset the selected database\n build_config[\"api_endpoint\"][\"name\"] = \"Database\"\n build_config[\"api_endpoint\"][\"display_name\"] = \"Database\"\n\n # If we retrieved options based on the token, show the dropdown\n build_config[\"api_endpoint\"][\"options\"] = [db[\"name\"] for db in database_options]\n build_config[\"api_endpoint\"][\"options_metadata\"] = [\n {k: v for k, v in db.items() if k not in [\"name\"]} for db in database_options\n ]\n # Scenario #2: We have no options from the provided token\n else:\n # Fallback to an API Endpoint if we couldn't retrieve options\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n build_config[\"api_endpoint\"][\"name\"] = \"API Endpoint\"\n build_config[\"api_endpoint\"][\"display_name\"] = \"Astra DB API Endpoint\"\n\n # If we didn't retrieve options based on the token, show the text input\n if \"options\" in build_config[\"api_endpoint\"]:\n del build_config[\"api_endpoint\"][\"options\"]\n\n # Get list of regions for a given cloud provider\n \"\"\"\n cloud_provider = (\n build_config[\"api_endpoint\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\"cloud_provider\"][\n \"value\"\n ]\n or \"Amazon Web Services\"\n )\n build_config[\"api_endpoint\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\"region\"][\n \"options\"\n ] = self.map_cloud_providers()[cloud_provider][\"regions\"]\n \"\"\"\n\n # Define variables for common collection conditions a user may experience\n no_collections = not build_config[\"collection_name\"][\"options\"]\n\n # Refresh the collection name options\n if field_name == \"api_endpoint\" or (field_name == \"collection_name\" and no_collections):\n # Reset the selected collection\n build_config[\"collection_name\"][\"value\"] = \"\"\n\n # Reload the list of collections and metadata associated\n collection_options = self._initialize_collection_options()\n build_config[\"collection_name\"][\"options\"] = [col[\"name\"] for col in collection_options]\n build_config[\"collection_name\"][\"options_metadata\"] = [\n {k: v for k, v in col.items() if k not in [\"name\"]} for col in collection_options\n ]\n\n # Define variables for common collection choice conditions a user may experience\n collection_chosen = field_value and build_config[\"collection_name\"][\"options\"]\n\n # Hide embedding model option if opriona_metadata provider is not null\n if field_name == \"collection_name\" and collection_chosen:\n # Find location of the name in the options list\n index_of_name = build_config[\"collection_name\"][\"options\"].index(field_value)\n value_of_provider = build_config[\"collection_name\"][\"options_metadata\"][index_of_name][\"provider\"]\n\n # If we were able to determine the Vectorize provider, set it accordingly\n if value_of_provider:\n build_config[\"embedding_model\"][\"advanced\"] = True\n build_config[\"embedding_choice\"][\"value\"] = \"Astra Vectorize\"\n else:\n build_config[\"embedding_model\"][\"advanced\"] = False\n build_config[\"embedding_choice\"][\"value\"] = \"Embedding Model\"\n\n # For the final step, get the list of vectorize providers\n \"\"\"\n vectorize_providers = self.get_vectorize_providers()\n if not vectorize_providers:\n return build_config\n\n # Allow the user to see the embedding provider options\n provider_options = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options\"]\n if not provider_options:\n # If the collection is set, allow user to see embedding options\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options\"] = [\"Bring your own\", \"Nvidia\", *[key for key in vectorize_providers if key != \"Nvidia\"]]\n\n # And allow the user to see the models based on a selected provider\n model_options = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_model\"\n ][\"options\"]\n if not model_options:\n embedding_provider = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"value\"]\n\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_model\"\n ][\"options\"] = vectorize_providers.get(embedding_provider, [[], []])[1]\n \"\"\"\n\n return build_config\n\n @check_cached_vector_store\n def build_vector_store(self):\n try:\n from langchain_astradb import AstraDBVectorStore\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n # Get the embedding model and additional params\n embedding_params = (\n {\"embedding\": self.embedding_model}\n if self.embedding_model and self.embedding_choice == \"Embedding Model\"\n else {}\n )\n\n additional_params = self.astradb_vectorstore_kwargs or {}\n\n # Get Langflow version and platform information\n __version__ = get_version_info()[\"version\"]\n langflow_prefix = \"\"\n if os.getenv(\"LANGFLOW_HOST\") is not None:\n langflow_prefix = \"ds-\"\n\n # Bundle up the auto-detect parameters\n autodetect_params = {\n \"autodetect_collection\": self.collection_exists(), # TODO: May want to expose this option\n \"content_field\": (\n self.content_field\n if self.content_field and embedding_params\n else (\n \"page_content\"\n if embedding_params and self.collection_data(collection_name=self.collection_name) == 0\n else None\n )\n ),\n \"ignore_invalid_documents\": self.ignore_invalid_documents,\n }\n\n # Attempt to build the Vector Store object\n try:\n vector_store = AstraDBVectorStore(\n # Astra DB Authentication Parameters\n token=self.token,\n api_endpoint=self.get_api_endpoint(),\n namespace=self.get_keyspace(),\n collection_name=self.collection_name,\n environment=self.environment,\n # Astra DB Usage Tracking Parameters\n ext_callers=[(f\"{langflow_prefix}langflow\", __version__)],\n # Astra DB Vector Store Parameters\n **autodetect_params,\n **embedding_params,\n **additional_params,\n )\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store) -> None:\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents and self.deletion_field:\n self.log(f\"Deleting documents where {self.deletion_field}\")\n try:\n database = self.get_database_object()\n collection = database.get_collection(self.collection_name, keyspace=self.get_keyspace())\n delete_values = list({doc.metadata[self.deletion_field] for doc in documents})\n self.log(f\"Deleting documents where {self.deletion_field} matches {delete_values}.\")\n collection.delete_many({f\"metadata.{self.deletion_field}\": {\"$in\": delete_values}})\n except Exception as e:\n msg = f\"Error deleting documents from AstraDBVectorStore based on '{self.deletion_field}': {e}\"\n raise ValueError(msg) from e\n\n if documents:\n self.log(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n self.log(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self) -> str:\n search_type_mapping = {\n \"Similarity with score threshold\": \"similarity_score_threshold\",\n \"MMR (Max Marginal Relevance)\": \"mmr\",\n }\n\n return search_type_mapping.get(self.search_type, \"similarity\")\n\n def _build_search_args(self):\n query = self.search_query if isinstance(self.search_query, str) and self.search_query.strip() else None\n\n if query:\n args = {\n \"query\": query,\n \"search_type\": self._map_search_type(),\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n elif self.advanced_search_filter:\n args = {\n \"n\": self.number_of_results,\n }\n else:\n return {}\n\n filter_arg = self.advanced_search_filter or {}\n if filter_arg:\n args[\"filter\"] = filter_arg\n\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n vector_store = vector_store or self.build_vector_store()\n\n self.log(f\"Search input: {self.search_query}\")\n self.log(f\"Search type: {self.search_type}\")\n self.log(f\"Number of results: {self.number_of_results}\")\n\n try:\n search_args = self._build_search_args()\n except Exception as e:\n msg = f\"Error in AstraDBVectorStore._build_search_args: {e}\"\n raise ValueError(msg) from e\n\n if not search_args:\n self.log(\"No search input or filters provided. Skipping search.\")\n return []\n\n docs = []\n search_method = \"search\" if \"query\" in search_args else \"metadata_search\"\n\n try:\n self.log(f\"Calling vector_store.{search_method} with args: {search_args}\")\n docs = getattr(vector_store, search_method)(**search_args)\n except Exception as e:\n msg = f\"Error performing {search_method} in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self.log(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n self.log(f\"Converted documents to data: {len(data)}\")\n self.status = data\n\n return data\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" + "value": "import os\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\n\nfrom astrapy import AstraDBAdmin, DataAPIClient, Database\nfrom langchain_astradb import AstraDBVectorStore, CollectionVectorServiceOptions\n\nfrom langflow.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom langflow.helpers import docs_to_data\nfrom langflow.inputs import FloatInput, NestedDictInput\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n SecretStrInput,\n StrInput,\n)\nfrom langflow.schema import Data\nfrom langflow.utils.version import get_version_info\n\n\nclass AstraDBVectorStoreComponent(LCVectorStoreComponent):\n display_name: str = \"Astra DB\"\n description: str = \"Ingest and search documents in Astra DB\"\n documentation: str = \"https://docs.datastax.com/en/langflow/astra-components.html\"\n name = \"AstraDB\"\n icon: str = \"AstraDB\"\n\n _cached_vector_store: AstraDBVectorStore | None = None\n\n @dataclass\n class NewDatabaseInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"description\": \"Create a new database in Astra DB.\",\n \"display_name\": \"Create New Database\",\n \"field_order\": [\"new_database_name\", \"cloud_provider\", \"region\"],\n \"template\": {\n \"new_database_name\": StrInput(\n name=\"new_database_name\",\n display_name=\"New Database Name\",\n info=\"Name of the new database to create in Astra DB.\",\n required=True,\n ),\n \"cloud_provider\": DropdownInput(\n name=\"cloud_provider\",\n display_name=\"Cloud Provider\",\n info=\"Cloud provider for the new database.\",\n options=[\"Amazon Web Services\", \"Google Cloud Platform\", \"Microsoft Azure\"],\n required=True,\n ),\n \"region\": DropdownInput(\n name=\"region\",\n display_name=\"Region\",\n info=\"Region for the new database.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n @dataclass\n class NewCollectionInput:\n functionality: str = \"create\"\n fields: dict[str, dict] = field(\n default_factory=lambda: {\n \"data\": {\n \"node\": {\n \"description\": \"Create a new collection in Astra DB.\",\n \"display_name\": \"Create New Collection\",\n \"field_order\": [\n \"new_collection_name\",\n \"embedding_generation_provider\",\n \"embedding_generation_model\",\n ],\n \"template\": {\n \"new_collection_name\": StrInput(\n name=\"new_collection_name\",\n display_name=\"New Collection Name\",\n info=\"Name of the new collection to create in Astra DB.\",\n required=True,\n ),\n \"embedding_generation_provider\": DropdownInput(\n name=\"embedding_generation_provider\",\n display_name=\"Embedding Generation Provider\",\n info=\"Provider to use for generating embeddings.\",\n options=[],\n required=True,\n ),\n \"embedding_generation_model\": DropdownInput(\n name=\"embedding_generation_model\",\n display_name=\"Embedding Generation Model\",\n info=\"Model to use for generating embeddings.\",\n options=[],\n required=True,\n ),\n },\n },\n }\n }\n )\n\n inputs = [\n SecretStrInput(\n name=\"token\",\n display_name=\"Astra DB Application Token\",\n info=\"Authentication token for accessing Astra DB.\",\n value=\"ASTRA_DB_APPLICATION_TOKEN\",\n required=True,\n real_time_refresh=True,\n input_types=[],\n ),\n StrInput(\n name=\"environment\",\n display_name=\"Environment\",\n info=\"The environment for the Astra DB API Endpoint.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"api_endpoint\",\n display_name=\"Database\",\n info=\"The Database / API Endpoint for the Astra DB instance.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n combobox=True,\n ),\n DropdownInput(\n name=\"collection_name\",\n display_name=\"Collection\",\n info=\"The name of the collection within Astra DB where the vectors will be stored.\",\n required=True,\n refresh_button=True,\n real_time_refresh=True,\n # dialog_inputs=asdict(NewCollectionInput()),\n combobox=True,\n ),\n StrInput(\n name=\"keyspace\",\n display_name=\"Keyspace\",\n info=\"Optional keyspace within Astra DB to use for the collection.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"embedding_choice\",\n display_name=\"Embedding Model or Astra Vectorize\",\n info=\"Choose an embedding model or use Astra Vectorize.\",\n options=[\"Embedding Model\", \"Astra Vectorize\"],\n value=\"Embedding Model\",\n advanced=True,\n real_time_refresh=True,\n ),\n HandleInput(\n name=\"embedding_model\",\n display_name=\"Embedding Model\",\n input_types=[\"Embeddings\"],\n info=\"Specify the Embedding Model. Not required for Astra Vectorize collections.\",\n required=False,\n ),\n *LCVectorStoreComponent.inputs,\n IntInput(\n name=\"number_of_results\",\n display_name=\"Number of Search Results\",\n info=\"Number of search results to return.\",\n advanced=True,\n value=4,\n ),\n DropdownInput(\n name=\"search_type\",\n display_name=\"Search Type\",\n info=\"Search type to use\",\n options=[\"Similarity\", \"Similarity with score threshold\", \"MMR (Max Marginal Relevance)\"],\n value=\"Similarity\",\n advanced=True,\n ),\n FloatInput(\n name=\"search_score_threshold\",\n display_name=\"Search Score Threshold\",\n info=\"Minimum similarity score threshold for search results. \"\n \"(when using 'Similarity with score threshold')\",\n value=0,\n advanced=True,\n ),\n NestedDictInput(\n name=\"advanced_search_filter\",\n display_name=\"Search Metadata Filter\",\n info=\"Optional dictionary of filters to apply to the search query.\",\n advanced=True,\n ),\n StrInput(\n name=\"content_field\",\n display_name=\"Content Field\",\n info=\"Field to use as the text content field for the vector store.\",\n advanced=True,\n ),\n StrInput(\n name=\"deletion_field\",\n display_name=\"Deletion Based On Field\",\n info=\"When this parameter is provided, documents in the target collection with \"\n \"metadata field values matching the input metadata field value will be deleted \"\n \"before new data is loaded.\",\n advanced=True,\n ),\n BoolInput(\n name=\"ignore_invalid_documents\",\n display_name=\"Ignore Invalid Documents\",\n info=\"Boolean flag to determine whether to ignore invalid documents at runtime.\",\n advanced=True,\n ),\n NestedDictInput(\n name=\"astradb_vectorstore_kwargs\",\n display_name=\"AstraDBVectorStore Parameters\",\n info=\"Optional dictionary of additional parameters for the AstraDBVectorStore.\",\n advanced=True,\n ),\n ]\n\n @classmethod\n def map_cloud_providers(cls):\n return {\n \"Amazon Web Services\": {\n \"id\": \"aws\",\n \"regions\": [\"us-east-2\", \"ap-south-1\", \"eu-west-1\"],\n },\n \"Google Cloud Platform\": {\n \"id\": \"gcp\",\n \"regions\": [\"us-east1\"],\n },\n \"Microsoft Azure\": {\n \"id\": \"azure\",\n \"regions\": [\"westus3\"],\n },\n }\n\n @classmethod\n def create_database_api(\n cls,\n token: str,\n new_database_name: str,\n cloud_provider: str,\n region: str,\n ):\n client = DataAPIClient(token=token)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Call the create database function\n return admin_client.create_database(\n name=new_database_name,\n cloud_provider=cloud_provider,\n region=region,\n )\n\n @classmethod\n def create_collection_api(\n cls,\n token: str,\n database_name: str,\n new_collection_name: str,\n dimension: int | None = None,\n embedding_generation_provider: str | None = None,\n embedding_generation_model: str | None = None,\n ):\n client = DataAPIClient(token=token)\n api_endpoint = cls.get_api_endpoint_static(token=token, database_name=database_name)\n\n # Get the database object\n database = client.get_database(api_endpoint=api_endpoint, token=token)\n\n # Build vectorize options, if needed\n vectorize_options = None\n if not dimension:\n vectorize_options = CollectionVectorServiceOptions(\n provider=embedding_generation_provider,\n model_name=embedding_generation_model,\n authentication=None,\n parameters=None,\n )\n\n # Create the collection\n return database.create_collection(\n name=new_collection_name,\n dimension=dimension,\n service=vectorize_options,\n )\n\n @classmethod\n def get_database_list_static(cls, token: str, environment: str | None = None):\n client = DataAPIClient(token=token, environment=environment)\n\n # Get the admin object\n admin_client = client.get_admin(token=token)\n\n # Get the list of databases\n db_list = list(admin_client.list_databases())\n\n # Generate the api endpoint for each database\n db_info_dict = {}\n for db in db_list:\n try:\n api_endpoint = f\"https://{db.info.id}-{db.info.region}.apps.astra.datastax.com\"\n db_info_dict[db.info.name] = {\n \"api_endpoint\": api_endpoint,\n \"collections\": len(\n list(\n client.get_database(\n api_endpoint=api_endpoint, token=token, keyspace=db.info.keyspace\n ).list_collection_names(keyspace=db.info.keyspace)\n )\n ),\n }\n except Exception: # noqa: BLE001, S110\n pass\n\n return db_info_dict\n\n def get_database_list(self):\n return self.get_database_list_static(token=self.token, environment=self.environment)\n\n @classmethod\n def get_api_endpoint_static(\n cls,\n token: str,\n environment: str | None = None,\n database_name: str | None = None,\n ):\n # Check if the database_name is like a url\n if database_name and database_name.startswith(\"https://\"):\n return database_name\n\n # If the database is not set, nothing we can do.\n if not database_name:\n return None\n\n # Otherwise, get the URL from the database list\n return cls.get_database_list_static(token=token, environment=environment).get(database_name).get(\"api_endpoint\")\n\n def get_api_endpoint(self):\n return self.get_api_endpoint_static(\n token=self.token,\n environment=self.environment,\n database_name=self.api_endpoint,\n )\n\n def get_keyspace(self):\n keyspace = self.keyspace\n\n if keyspace:\n return keyspace.strip()\n\n return None\n\n def get_database_object(self):\n try:\n client = DataAPIClient(token=self.token, environment=self.environment)\n\n return client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n except Exception as e: # noqa: BLE001\n self.log(f\"Error getting database: {e}\")\n\n return None\n\n def collection_exists(self):\n try:\n client = DataAPIClient(token=self.token, environment=self.environment)\n database = client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n return self.collection_name in list(database.list_collection_names(keyspace=self.get_keyspace()))\n except Exception as e: # noqa: BLE001\n self.log(f\"Error getting collection status: {e}\")\n\n return False\n\n def collection_data(self, collection_name: str, database: Database | None = None):\n try:\n if not database:\n client = DataAPIClient(token=self.token, environment=self.environment)\n\n database = client.get_database(\n api_endpoint=self.get_api_endpoint(),\n token=self.token,\n keyspace=self.get_keyspace(),\n )\n\n collection = database.get_collection(collection_name, keyspace=self.get_keyspace())\n\n return collection.estimated_document_count()\n except Exception as e: # noqa: BLE001\n self.log(f\"Error checking collection data: {e}\")\n\n return None\n\n def get_vectorize_providers(self):\n try:\n self.log(\"Dynamically updating list of Vectorize providers.\")\n\n # Get the admin object\n admin = AstraDBAdmin(token=self.token)\n db_admin = admin.get_database_admin(api_endpoint=self.get_api_endpoint())\n\n # Get the list of embedding providers\n embedding_providers = db_admin.find_embedding_providers().as_dict()\n\n vectorize_providers_mapping = {}\n # Map the provider display name to the provider key and models\n for provider_key, provider_data in embedding_providers[\"embeddingProviders\"].items():\n display_name = provider_data[\"displayName\"]\n models = [model[\"name\"] for model in provider_data[\"models\"]]\n\n # TODO: https://astra.datastax.com/api/v2/graphql\n vectorize_providers_mapping[display_name] = [provider_key, models]\n\n # Sort the resulting dictionary\n return defaultdict(list, dict(sorted(vectorize_providers_mapping.items())))\n except Exception as e: # noqa: BLE001\n self.log(f\"Error fetching Vectorize providers: {e}\")\n\n return {}\n\n def _initialize_database_options(self):\n try:\n return [\n {\"name\": name, \"collections\": info[\"collections\"]} for name, info in self.get_database_list().items()\n ]\n except Exception as e: # noqa: BLE001\n self.log(f\"Error fetching databases: {e}\")\n\n return []\n\n def _initialize_collection_options(self):\n database = self.get_database_object()\n if database is None:\n return []\n\n try:\n collection_list = list(database.list_collections(keyspace=self.get_keyspace()))\n\n return [\n {\n \"name\": col.name,\n \"records\": self.collection_data(collection_name=col.name, database=database),\n \"provider\": (\n col.options.vector.service.provider\n if col.options.vector and col.options.vector.service\n else None\n ),\n \"icon\": \"\",\n \"model\": (\n col.options.vector.service.model_name\n if col.options.vector and col.options.vector.service\n else None\n ),\n }\n for col in collection_list\n ]\n except Exception as e: # noqa: BLE001\n self.log(f\"Error fetching collections: {e}\")\n\n return []\n\n def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None):\n # Define variables for common database conditions a user may experience\n is_hosted = os.getenv(\"LANGFLOW_HOST\") is not None\n no_databases = \"options\" not in build_config[\"api_endpoint\"] or not build_config[\"api_endpoint\"][\"options\"]\n no_api_endpoint = not build_config[\"api_endpoint\"][\"value\"]\n\n # Refresh the database name options\n if not is_hosted and (field_name in [\"token\", \"environment\"] or (no_databases and no_api_endpoint)):\n # Get the list of options we have based on the token provided\n database_options = self._initialize_database_options()\n\n # Reset the collection values selected\n build_config[\"collection_name\"][\"options\"] = []\n build_config[\"collection_name\"][\"options_metadata\"] = []\n build_config[\"collection_name\"][\"value\"] = \"\"\n\n # Scenario #1: We have database options from the provided token\n if database_options:\n # Reset the selected database\n build_config[\"api_endpoint\"][\"name\"] = \"Database\"\n build_config[\"api_endpoint\"][\"display_name\"] = \"Database\"\n\n # If we retrieved options based on the token, show the dropdown\n build_config[\"api_endpoint\"][\"options\"] = [db[\"name\"] for db in database_options]\n build_config[\"api_endpoint\"][\"options_metadata\"] = [\n {k: v for k, v in db.items() if k not in [\"name\"]} for db in database_options\n ]\n # Scenario #2: We have no options from the provided token\n else:\n # Fallback to an API Endpoint if we couldn't retrieve options\n build_config[\"api_endpoint\"][\"value\"] = \"\"\n build_config[\"api_endpoint\"][\"name\"] = \"API Endpoint\"\n build_config[\"api_endpoint\"][\"display_name\"] = \"Astra DB API Endpoint\"\n\n # If we didn't retrieve options based on the token, show the text input\n if \"options\" in build_config[\"api_endpoint\"]:\n del build_config[\"api_endpoint\"][\"options\"]\n\n # Get list of regions for a given cloud provider\n \"\"\"\n cloud_provider = (\n build_config[\"api_endpoint\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\"cloud_provider\"][\n \"value\"\n ]\n or \"Amazon Web Services\"\n )\n build_config[\"api_endpoint\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\"region\"][\n \"options\"\n ] = self.map_cloud_providers()[cloud_provider][\"regions\"]\n \"\"\"\n\n # Refresh the collection name options\n if field_name == \"api_endpoint\":\n # Reset the selected collection\n build_config[\"collection_name\"][\"value\"] = \"\"\n\n # Reload the list of collections and metadata associated\n collection_options = self._initialize_collection_options()\n build_config[\"collection_name\"][\"options\"] = [col[\"name\"] for col in collection_options]\n build_config[\"collection_name\"][\"options_metadata\"] = [\n {k: v for k, v in col.items() if k not in [\"name\"]} for col in collection_options\n ]\n\n # Hide embedding model option if opriona_metadata provider is not null\n if field_name == \"collection_name\" and field_value:\n # Set the options for collection name to be the field value if its a new collection\n if not is_hosted and field_value not in build_config[\"collection_name\"][\"options\"]:\n build_config[\"collection_name\"][\"options\"].append(field_value)\n build_config[\"collection_name\"][\"options_metadata\"].append(\n {\"records\": 0, \"provider\": None, \"icon\": \"\", \"model\": None}\n )\n\n # Find location of the name in the options list\n index_of_name = build_config[\"collection_name\"][\"options\"].index(field_value)\n value_of_provider = build_config[\"collection_name\"][\"options_metadata\"][index_of_name][\"provider\"]\n\n # If we were able to determine the Vectorize provider, set it accordingly\n if value_of_provider:\n build_config[\"embedding_model\"][\"advanced\"] = True\n build_config[\"embedding_choice\"][\"value\"] = \"Astra Vectorize\"\n else:\n build_config[\"embedding_model\"][\"advanced\"] = False\n build_config[\"embedding_choice\"][\"value\"] = \"Embedding Model\"\n\n # For the final step, get the list of vectorize providers\n \"\"\"\n vectorize_providers = self.get_vectorize_providers()\n if not vectorize_providers:\n return build_config\n\n # Allow the user to see the embedding provider options\n provider_options = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options\"]\n if not provider_options:\n # If the collection is set, allow user to see embedding options\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"options\"] = [\"Bring your own\", \"Nvidia\", *[key for key in vectorize_providers if key != \"Nvidia\"]]\n\n # And allow the user to see the models based on a selected provider\n model_options = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_model\"\n ][\"options\"]\n if not model_options:\n embedding_provider = build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_provider\"\n ][\"value\"]\n\n build_config[\"collection_name\"][\"dialog_inputs\"][\"fields\"][\"data\"][\"node\"][\"template\"][\n \"embedding_generation_model\"\n ][\"options\"] = vectorize_providers.get(embedding_provider, [[], []])[1]\n \"\"\"\n\n return build_config\n\n @check_cached_vector_store\n def build_vector_store(self):\n try:\n from langchain_astradb import AstraDBVectorStore\n except ImportError as e:\n msg = (\n \"Could not import langchain Astra DB integration package. \"\n \"Please install it with `pip install langchain-astradb`.\"\n )\n raise ImportError(msg) from e\n\n # Get the embedding model and additional params\n embedding_params = (\n {\"embedding\": self.embedding_model}\n if self.embedding_model and self.embedding_choice == \"Embedding Model\"\n else {}\n )\n\n additional_params = self.astradb_vectorstore_kwargs or {}\n\n # Get Langflow version and platform information\n __version__ = get_version_info()[\"version\"]\n langflow_prefix = \"\"\n if os.getenv(\"LANGFLOW_HOST\") is not None:\n langflow_prefix = \"ds-\"\n\n # Bundle up the auto-detect parameters\n autodetect_params = {\n \"autodetect_collection\": self.collection_exists(), # TODO: May want to expose this option\n \"content_field\": (\n self.content_field\n if self.content_field and embedding_params\n else (\n \"page_content\"\n if embedding_params and self.collection_data(collection_name=self.collection_name) == 0\n else None\n )\n ),\n \"ignore_invalid_documents\": self.ignore_invalid_documents,\n }\n\n # Attempt to build the Vector Store object\n try:\n vector_store = AstraDBVectorStore(\n # Astra DB Authentication Parameters\n token=self.token,\n api_endpoint=self.get_api_endpoint(),\n namespace=self.get_keyspace(),\n collection_name=self.collection_name,\n environment=self.environment,\n # Astra DB Usage Tracking Parameters\n ext_callers=[(f\"{langflow_prefix}langflow\", __version__)],\n # Astra DB Vector Store Parameters\n **autodetect_params,\n **embedding_params,\n **additional_params,\n )\n except Exception as e:\n msg = f\"Error initializing AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self._add_documents_to_vector_store(vector_store)\n\n return vector_store\n\n def _add_documents_to_vector_store(self, vector_store) -> None:\n documents = []\n for _input in self.ingest_data or []:\n if isinstance(_input, Data):\n documents.append(_input.to_lc_document())\n else:\n msg = \"Vector Store Inputs must be Data objects.\"\n raise TypeError(msg)\n\n if documents and self.deletion_field:\n self.log(f\"Deleting documents where {self.deletion_field}\")\n try:\n database = self.get_database_object()\n collection = database.get_collection(self.collection_name, keyspace=self.get_keyspace())\n delete_values = list({doc.metadata[self.deletion_field] for doc in documents})\n self.log(f\"Deleting documents where {self.deletion_field} matches {delete_values}.\")\n collection.delete_many({f\"metadata.{self.deletion_field}\": {\"$in\": delete_values}})\n except Exception as e:\n msg = f\"Error deleting documents from AstraDBVectorStore based on '{self.deletion_field}': {e}\"\n raise ValueError(msg) from e\n\n if documents:\n self.log(f\"Adding {len(documents)} documents to the Vector Store.\")\n try:\n vector_store.add_documents(documents)\n except Exception as e:\n msg = f\"Error adding documents to AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n else:\n self.log(\"No documents to add to the Vector Store.\")\n\n def _map_search_type(self) -> str:\n search_type_mapping = {\n \"Similarity with score threshold\": \"similarity_score_threshold\",\n \"MMR (Max Marginal Relevance)\": \"mmr\",\n }\n\n return search_type_mapping.get(self.search_type, \"similarity\")\n\n def _build_search_args(self):\n query = self.search_query if isinstance(self.search_query, str) and self.search_query.strip() else None\n\n if query:\n args = {\n \"query\": query,\n \"search_type\": self._map_search_type(),\n \"k\": self.number_of_results,\n \"score_threshold\": self.search_score_threshold,\n }\n elif self.advanced_search_filter:\n args = {\n \"n\": self.number_of_results,\n }\n else:\n return {}\n\n filter_arg = self.advanced_search_filter or {}\n if filter_arg:\n args[\"filter\"] = filter_arg\n\n return args\n\n def search_documents(self, vector_store=None) -> list[Data]:\n vector_store = vector_store or self.build_vector_store()\n\n self.log(f\"Search input: {self.search_query}\")\n self.log(f\"Search type: {self.search_type}\")\n self.log(f\"Number of results: {self.number_of_results}\")\n\n try:\n search_args = self._build_search_args()\n except Exception as e:\n msg = f\"Error in AstraDBVectorStore._build_search_args: {e}\"\n raise ValueError(msg) from e\n\n if not search_args:\n self.log(\"No search input or filters provided. Skipping search.\")\n return []\n\n docs = []\n search_method = \"search\" if \"query\" in search_args else \"metadata_search\"\n\n try:\n self.log(f\"Calling vector_store.{search_method} with args: {search_args}\")\n docs = getattr(vector_store, search_method)(**search_args)\n except Exception as e:\n msg = f\"Error performing {search_method} in AstraDBVectorStore: {e}\"\n raise ValueError(msg) from e\n\n self.log(f\"Retrieved documents: {len(docs)}\")\n\n data = docs_to_data(docs)\n self.log(f\"Converted documents to data: {len(data)}\")\n self.status = data\n\n return data\n\n def get_retriever_kwargs(self):\n search_args = self._build_search_args()\n\n return {\n \"search_type\": self._map_search_type(),\n \"search_kwargs\": search_args,\n }\n" }, "collection_name": { "_input_type": "DropdownInput", @@ -4017,28 +4017,28 @@ "type": "AstraDB" }, "dragging": false, - "id": "AstraDB-On9gj", + "id": "AstraDB-DY2V0", "measured": { "height": 614, "width": 320 }, "position": { - "x": 2053.5494369250237, - "y": 1464.5807736028041 + "x": 2053.8028711939423, + "y": 1455.7952184640951 }, "selected": false, "type": "genericNode" } ], "viewport": { - "x": 23.946958998386435, - "y": -163.93184624766263, - "zoom": 0.44406917240373706 + "x": 42.69696210801385, + "y": -162.1714705321857, + "zoom": 0.43217536168398524 } }, "description": "Load your data for chat context with Retrieval Augmented Generation.", "endpoint_name": null, - "id": "20de5d98-7fe3-4530-ac01-6377766f26aa", + "id": "b57f5ec7-f4f1-42d9-b877-4b2a4fb0650a", "is_component": false, "last_tested_version": "1.1.2", "name": "Vector Store RAG",