This documentation describes the integration of MindsDB with LangChain, a framework for developing applications powered by language models.
The integration allows for the deployment of LangChain models within MindsDB, providing the models with access to data from various data sources.
CREATE ML_ENGINE langchain_engineFROM langchainUSING serper_api_key = 'your-serper-api-key'; -- it is an optional parameter (if provided, the model will use serper.dev search to enhance the output)
Create a model using langchain_engine as an engine and a selected model provider.
Copy
Ask AI
CREATE MODEL langchain_modelPREDICT target_columnUSING engine = 'langchain_engine', -- engine name as created via CREATE ML_ENGINE <provider>_api_key = 'api-key-value', -- replace <provider> with one of the available values (openai, anthropic, anyscale, google, litellm) model_name = 'model-name', -- optional, model to be used (for example, 'gpt-4' if 'openai_api_key' provided) prompt_template = 'message to the model that may include some {{input}} columns as variables', max_tokens = 4096; -- defines the maximum number of tokens
There are three different tools utilized by this agent:
MindsDB is the internal MindsDB executor.
Metadata fetches the metadata information for the available tables.
Write is able to write agent responses into a MindsDB data source.
Each tool exposes the internal MindsDB executor in a different way to perform its tasks, effectively enabling the agent model to read from (and potentially write to) data sources or models available in the active MindsDB project.
Create a conversational model using langchain_engine as an engine and a selected model provider.
OpenAI
Copy
Ask AI
CREATE MODEL langchain_openai_modelPREDICT answerUSING engine = 'langchain_engine', -- engine name as created via CREATE ML_ENGINE provider = 'openai', -- one of the available providers openai_api_key = 'api-key-value', model_name = 'gpt-3.5-turbo', -- choose one of the available OpenAI models mode = 'conversational', -- conversational mode user_column = 'question', -- column name that stores input from the user assistant_column = 'answer', -- column name that stores output of the model (see PREDICT column) base_url = 'base-url-value', -- optional, default is https://api.openai.com/v1/ verbose = True, prompt_template = 'Answer the users input in a helpful way: {{question}}', max_tokens = 4096;
Anthropic
Copy
Ask AI
CREATE MODEL langchain_anthropic_modelPREDICT answerUSING engine = 'langchain_engine', -- engine name as created via CREATE ML_ENGINE provider = 'anthropic', -- one of the available providers anthropic_api_key = 'api-key-value', model_name = 'claude-2.1', -- choose one of the available OpenAI models mode = 'conversational', -- conversational mode user_column = 'question', -- column name that stores input from the user assistant_column = 'answer', -- column name that stores output of the model (see PREDICT column) verbose = True, prompt_template = 'Answer the users input in a helpful way: {{question}}', max_tokens = 4096;
Anyscale
Copy
Ask AI
CREATE MODEL langchain_anyscale_modelPREDICT answer USING engine = 'langchain_engine', -- engine name as created via CREATE ML_ENGINE provider = 'anyscale', -- one of the available providers anyscale_api_key = 'api-key-value', model_name = 'mistralai/Mistral-7B-Instruct-v0.1', -- choose one of the models available from Anyscale mode = 'conversational', -- conversational mode user_column = 'question', -- column name that stores input from the user assistant_column = 'answer', -- column name that stores output of the model (see PREDICT column) base_url = 'https://api.endpoints.anyscale.com/v1', verbose = True, prompt_template = 'Answer the users input in a helpful way: {{question}}', max_tokens = 4096;
Ollama
Copy
Ask AI
CREATE MODEL langchain_ollama_modelPREDICT answer USING engine = 'langchain_engine', -- engine name as created via CREATE ML_ENGINE provider = 'ollama', -- one of the available providers model_name = 'llama2', -- choose one of the models available from Ollama mode = 'conversational', -- conversational mode user_column = 'question', -- column name that stores input from the user assistant_column = 'answer', -- column name that stores output of the model (see PREDICT column) verbose = True, prompt_template = 'Answer the users input in a helpful way: {{question}}', max_tokens = 4096;
CREATE MODEL langchain_litellm_modelPREDICT answer USING engine = 'langchain_engine', -- engine name as created via CREATE ML_ENGINE provider = 'litellm', -- one of the available providers litellm_api_key = 'api-key-value', model_name = 'gpt-4', -- choose one of the models available from LiteLLM mode = 'conversational', -- conversational mode user_column = 'question', -- column name that stores input from the user assistant_column = 'answer', -- column name that stores output of the model (see PREDICT column) base_url = 'https://ai.dev.mindsdb.com', verbose = True, prompt_template = 'Answer the users input in a helpful way: {{question}}', max_tokens = 4096;
Google
Copy
Ask AI
CREATE MODEL langchain_google_modelPREDICT answer USING engine = 'langchain_engine', -- engine name as created via CREATE ML_ENGINE provider = 'google', -- one of the available providers google_api_key = 'api-key-value', model_name = 'gemini-1.5-flash', -- choose one of the models available from Google mode = 'conversational', -- conversational mode user_column = 'question', -- column name that stores input from the user assistant_column = 'answer', -- column name that stores output of the model (see PREDICT column) verbose = True, prompt_template = 'Answer the users input in a helpful way: {{question}}', max_tokens = 4096;
MindsDB
Copy
Ask AI
CREATE MODEL langchain_mindsdb_modelPREDICT answerUSING engine = 'langchain_engine', -- engine name as created via CREATE ML_ENGINE provider = 'mindsdb', -- one of the available providers model_name = 'mindsdb_model', -- any model created within MindsDB mode = 'conversational', -- conversational mode user_column = 'question', -- column name that stores input from the user assistant_column = 'answer', -- column name that stores output of the model (see PREDICT column) verbose = True, prompt_template = 'Answer the users input in a helpful way: {{question}}', max_tokens = 4096;