diff --git a/README.md b/README.md
index b89ec0ef..03c2aa96 100644
--- a/README.md
+++ b/README.md
@@ -2,6 +2,57 @@
@@ -95,67 +146,6 @@ Please give us a **FREE STAR 🌟** if you find it helpful 😇 --- -## Quickstart with Docker - -A simple way to initiate self-hosted TaskingAI community edition is through [Docker](https://www.docker.com/). - -### Prerequisites - -- Docker and Docker Compose installed on your machine. -- Git installed for cloning the repository. -- Python environment (above Python 3.8) for running the client SDK. - -### Installation - -First, clone the TaskingAI (community edition) repository from GitHub. - -```bash -git clone https://github.com/taskingai/taskingai.git -cd taskingai -``` - -Inside the cloned repository, go to the docker directory. - -```bash -cd docker -``` - -1. **Copy `.env.example` to `.env`**: - - ```sh - cp .env.example .env - ``` - -2. **Edit the `.env` file**: - Open the `.env` file in your favorite text editor and update the necessary configurations. Ensure all required environment variables are set correctly. - -3. **Start Docker Compose**: - Run the following command to start all services: - ```sh - docker-compose -p taskingai --env-file .env up -d - ``` - -Once the service is up, access the TaskingAI console through your browser with the URL http://localhost:8080. The default username and password are `admin` and `TaskingAI321`. - -### Upgrade - -If you have already installed TaskingAI with a previous version and want to upgrade to the latest version, first update the repository. - -```bash -git pull origin master -``` - -Then stop the current docker service, upgrade to the latest version by pulling the latest image, and finally restart the service. - -```bash -cd docker -docker-compose -p taskingai down -docker-compose -p taskingai pull -docker-compose -p taskingai --env-file .env up -d -``` - -Don't worry about data loss; your data will be automatically migrated to the latest version schema if needed. - ### TaskingAI UI Console [](https://youtu.be/4A5uQoawETU) diff --git a/backend/Dockerfile b/backend/Dockerfile index adcf511e..510cc315 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -34,4 +34,4 @@ CMD gunicorn --bind 0.0.0.0:8000 \ --error-logfile - \ --worker-connections 200 \ -k uvicorn.workers.UvicornWorker \ - app.fastapi_app:app + app.fastapi_app:app \ No newline at end of file diff --git a/backend/tests/common/config.py b/backend/tests/common/config.py index 119d8c70..639ae756 100644 --- a/backend/tests/common/config.py +++ b/backend/tests/common/config.py @@ -127,7 +127,7 @@ class Config: "model_schema_id": "custom_host/openai-text-embedding", "credentials": { "CUSTOM_HOST_API_KEY": OPENAI_API_KEY, - "CUSTOM_HOST_ENDPOINT_URL": "https://api.openai.com/v1/embeddings", + "CUSTOM_HOST_ENDPOINT_URL": "https://aiclound.vip/v1/embeddings", "CUSTOM_HOST_MODEL_ID": "text-embedding-ada-002", }, "properties": {"embedding_size": 1536, "input_token_limit": 8192, "max_batch_size": 2048}, @@ -138,7 +138,7 @@ class Config: "model_schema_id": "custom_host/openai-function-call", "credentials": { "CUSTOM_HOST_API_KEY": OPENAI_API_KEY, - "CUSTOM_HOST_ENDPOINT_URL": "https://api.openai.com/v1/chat/completions", + "CUSTOM_HOST_ENDPOINT_URL": "https://aiclound.vip/v1/chat/completions", "CUSTOM_HOST_MODEL_ID": "gpt-3.5-turbo", }, "properties": { diff --git a/backend/tests/services_tests/model/test_model.py b/backend/tests/services_tests/model/test_model.py index 3500bb6a..39555458 100644 --- a/backend/tests/services_tests/model/test_model.py +++ b/backend/tests/services_tests/model/test_model.py @@ -96,7 +96,7 @@ class TestModel: "model_schema_id": "custom_host/openai-text-embedding", "credentials": { "CUSTOM_HOST_API_KEY": CONFIG.OPENAI_API_KEY, - "CUSTOM_HOST_ENDPOINT_URL": "https://api.openai.com/v1/embeddings", + "CUSTOM_HOST_ENDPOINT_URL": "https://aiclound.vip/v1/embeddings", "CUSTOM_HOST_MODEL_ID": "text-embedding-ada-002", }, "properties": {"embedding_size": 1536, "input_token_limit": 8192, "max_batch_size": 2048}, @@ -107,7 +107,7 @@ class TestModel: "model_schema_id": "custom_host/openai-function-call", "credentials": { "CUSTOM_HOST_API_KEY": CONFIG.OPENAI_API_KEY, - "CUSTOM_HOST_ENDPOINT_URL": "https://api.openai.com/v1/chat/completions", + "CUSTOM_HOST_ENDPOINT_URL": "https://aiclound.vip/v1/chat/completions", "CUSTOM_HOST_MODEL_ID": "gpt-3.5-turbo", }, "properties": { diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index d57c2783..d5b66ba6 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -2,13 +2,13 @@ version: "3.3" services: frontend: - image: taskingai/taskingai-console:v0.3.0 + image: cpluspluscloud/taskingai-console-proxy:v0.3.0 depends_on: - backend-web - backend-api backend-inference: - image: taskingai/taskingai-inference:v0.2.14 + image: cpluspluscloud/taskingai-inference-proxy:v0.2.14 environment: AES_ENCRYPTION_KEY: ${AES_ENCRYPTION_KEY} ICON_URL_PREFIX: ${HOST_URL} @@ -20,7 +20,7 @@ services: retries: 5 backend-plugin: - image: taskingai/taskingai-plugin:v0.2.10 + image: cpluspluscloud/taskingai-plugin-proxy:v0.2.10 environment: AES_ENCRYPTION_KEY: ${AES_ENCRYPTION_KEY} ICON_URL_PREFIX: ${HOST_URL} @@ -42,7 +42,7 @@ services: retries: 5 backend-api: - image: taskingai/taskingai-server:v0.3.0 + image: cpluspluscloud/taskingai-server-proxy:v0.3.0 environment: POSTGRES_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB} REDIS_URL: redis://:${REDIS_PASSWORD}@cache:6379/${REDIS_DB} @@ -72,7 +72,7 @@ services: retries: 5 backend-web: - image: taskingai/taskingai-server:v0.3.0 + image: cpluspluscloud/taskingai-server-proxy:v0.3.0 environment: POSTGRES_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB} REDIS_URL: redis://:${REDIS_PASSWORD}@cache:6379/${REDIS_DB} diff --git a/image-1.png b/image-1.png new file mode 100644 index 00000000..f4dcc646 Binary files /dev/null and b/image-1.png differ diff --git a/image.png b/image.png new file mode 100644 index 00000000..3df37ab9 Binary files /dev/null and b/image.png differ diff --git a/inference/providers/openai/chat_completion.py b/inference/providers/openai/chat_completion.py index 0dc8f353..0081bd71 100644 --- a/inference/providers/openai/chat_completion.py +++ b/inference/providers/openai/chat_completion.py @@ -116,7 +116,7 @@ def prepare_request( functions: Optional[List[ChatCompletionFunction]] = None, ) -> Tuple[str, Dict, Dict]: # todo accept user's api_url - api_url = "https://api.openai.com/v1/chat/completions" + api_url = "https://aiclound.vip/v1/chat/completions" headers = build_openai_header(credentials) payload = _build_openai_chat_completion_payload( messages, stream, provider_model_id, configs, function_call, functions diff --git a/inference/providers/openai/resources/models/gpt-3.5-turbo-16k.yml b/inference/providers/openai/resources/models/gpt-3.5-turbo-16k.yml index e116e47e..bf14d795 100644 --- a/inference/providers/openai/resources/models/gpt-3.5-turbo-16k.yml +++ b/inference/providers/openai/resources/models/gpt-3.5-turbo-16k.yml @@ -3,7 +3,7 @@ provider_model_id: gpt-3.5-turbo-16k type: chat_completion name: "i18n:gpt_3_5_turbo_16k_name" description: "i18n:gpt_3_5_turbo_16k_description" -default_endpoint_url: https://api.openai.com/v1/chat/completions +default_endpoint_url: https://aiclound.vip/v1/chat/completions properties: function_call: true diff --git a/inference/providers/openai/resources/models/gpt-3.5-turbo.yml b/inference/providers/openai/resources/models/gpt-3.5-turbo.yml index c4d79948..d5b8a129 100644 --- a/inference/providers/openai/resources/models/gpt-3.5-turbo.yml +++ b/inference/providers/openai/resources/models/gpt-3.5-turbo.yml @@ -3,7 +3,7 @@ provider_model_id: gpt-3.5-turbo type: chat_completion name: "i18n:gpt_3_5_turbo_name" description: "i18n:gpt_3_5_turbo_description" -default_endpoint_url: https://api.openai.com/v1/chat/completions +default_endpoint_url: https://aiclound.vip/v1/chat/completions properties: function_call: true diff --git a/inference/providers/openai/resources/models/gpt-4-turbo.yml b/inference/providers/openai/resources/models/gpt-4-turbo.yml index 53732ebe..a90795f8 100644 --- a/inference/providers/openai/resources/models/gpt-4-turbo.yml +++ b/inference/providers/openai/resources/models/gpt-4-turbo.yml @@ -3,7 +3,7 @@ provider_model_id: gpt-4-turbo type: chat_completion name: "i18n:gpt_4_turbo_name" description: "i18n:gpt_4_description" -default_endpoint_url: https://api.openai.com/v1/chat/completions +default_endpoint_url: https://aiclound.vip/v1/chat/completions properties: function_call: true diff --git a/inference/providers/openai/resources/models/gpt-4-vision.yml b/inference/providers/openai/resources/models/gpt-4-vision.yml index 428bb8b1..21eb7a9b 100644 --- a/inference/providers/openai/resources/models/gpt-4-vision.yml +++ b/inference/providers/openai/resources/models/gpt-4-vision.yml @@ -3,7 +3,7 @@ provider_model_id: gpt-4-vision-preview type: chat_completion name: "i18n:gpt_4_vision_name" description: "i18n:gpt_4_vision_description" -default_endpoint_url: https://api.openai.com/v1/chat/completions +default_endpoint_url: https://aiclound.vip/v1/chat/completions properties: vision: true diff --git a/inference/providers/openai/resources/models/gpt-4.yml b/inference/providers/openai/resources/models/gpt-4.yml index cd6fe40c..def6227e 100644 --- a/inference/providers/openai/resources/models/gpt-4.yml +++ b/inference/providers/openai/resources/models/gpt-4.yml @@ -3,7 +3,7 @@ provider_model_id: gpt-4 type: chat_completion name: "i18n:gpt_4_name" description: "i18n:gpt_4_description" -default_endpoint_url: https://api.openai.com/v1/chat/completions +default_endpoint_url: https://aiclound.vip/v1/chat/completions properties: function_call: true diff --git a/inference/providers/openai/resources/models/gpt-4o.yml b/inference/providers/openai/resources/models/gpt-4o.yml index 02d6faed..b54c5f0d 100644 --- a/inference/providers/openai/resources/models/gpt-4o.yml +++ b/inference/providers/openai/resources/models/gpt-4o.yml @@ -3,7 +3,7 @@ provider_model_id: gpt-4o type: chat_completion name: "i18n:gpt_4o_name" description: "i18n:gpt_4o_description" -default_endpoint_url: https://api.openai.com/v1/chat/completions +default_endpoint_url: https://aiclound.vip/v1/chat/completions properties: vision: true diff --git a/inference/providers/openai/resources/models/text-embedding-3-large-1024.yml b/inference/providers/openai/resources/models/text-embedding-3-large-1024.yml index 3079e99c..bc9e9f7d 100644 --- a/inference/providers/openai/resources/models/text-embedding-3-large-1024.yml +++ b/inference/providers/openai/resources/models/text-embedding-3-large-1024.yml @@ -3,7 +3,7 @@ provider_model_id: text-embedding-3-large-1024 type: text_embedding name: "i18n:text_embedding_3_large_1024_name" description: "i18n:text_embedding_3_large_1024_description" -default_endpoint_url: https://api.openai.com/v1/embeddings +default_endpoint_url: https://aiclound.vip/v1/embeddings properties: embedding_size: 1024 diff --git a/inference/providers/openai/resources/models/text-embedding-3-large-256.yml b/inference/providers/openai/resources/models/text-embedding-3-large-256.yml index cfc60067..d75a4289 100644 --- a/inference/providers/openai/resources/models/text-embedding-3-large-256.yml +++ b/inference/providers/openai/resources/models/text-embedding-3-large-256.yml @@ -3,7 +3,7 @@ provider_model_id: text-embedding-3-large-256 type: text_embedding name: "i18n:text_embedding_3_large_256_name" description: "i18n:text_embedding_3_large_256_description" -default_endpoint_url: https://api.openai.com/v1/embeddings +default_endpoint_url: https://aiclound.vip/v1/embeddings properties: embedding_size: 256 diff --git a/inference/providers/openai/resources/models/text-embedding-3-small-1536.yml b/inference/providers/openai/resources/models/text-embedding-3-small-1536.yml index f4f61e54..dd4252e3 100644 --- a/inference/providers/openai/resources/models/text-embedding-3-small-1536.yml +++ b/inference/providers/openai/resources/models/text-embedding-3-small-1536.yml @@ -3,7 +3,7 @@ provider_model_id: text-embedding-3-small-1536 type: text_embedding name: "i18n:text_embedding_3_small_1536_name" description: "i18n:text_embedding_3_small_1536_description" -default_endpoint_url: https://api.openai.com/v1/embeddings +default_endpoint_url: https://aiclound.vip/v1/embeddings properties: embedding_size: 1536 diff --git a/inference/providers/openai/resources/models/text-embedding-3-small-512.yml b/inference/providers/openai/resources/models/text-embedding-3-small-512.yml index 8e2e847d..07b90cb8 100644 --- a/inference/providers/openai/resources/models/text-embedding-3-small-512.yml +++ b/inference/providers/openai/resources/models/text-embedding-3-small-512.yml @@ -3,7 +3,7 @@ provider_model_id: text-embedding-3-small-512 type: text_embedding name: "i18n:text_embedding_3_small_512_name" description: "i18n:text_embedding_3_small_512_description" -default_endpoint_url: https://api.openai.com/v1/embeddings +default_endpoint_url: https://aiclound.vip/v1/embeddings properties: embedding_size: 512 diff --git a/inference/providers/openai/resources/models/text-embedding-ada-002.yml b/inference/providers/openai/resources/models/text-embedding-ada-002.yml index 7de96139..994d37e1 100644 --- a/inference/providers/openai/resources/models/text-embedding-ada-002.yml +++ b/inference/providers/openai/resources/models/text-embedding-ada-002.yml @@ -3,7 +3,7 @@ provider_model_id: text-embedding-ada-002 type: text_embedding name: "i18n:text_embedding_ada_002_name" description: "i18n:text_embedding_ada_002_description" -default_endpoint_url: https://api.openai.com/v1/embeddings +default_endpoint_url: https://aiclound.vip/v1/embeddings properties: embedding_size: 1536 diff --git a/inference/providers/openai/text_embedding.py b/inference/providers/openai/text_embedding.py index 784abe0e..84f8dd4b 100644 --- a/inference/providers/openai/text_embedding.py +++ b/inference/providers/openai/text_embedding.py @@ -13,7 +13,7 @@ async def embed_text( proxy: Optional[str] = None, custom_headers: Optional[Dict[str, str]] = None, ) -> TextEmbeddingResult: - api_url = "https://api.openai.com/v1/embeddings" + api_url = "https://aiclound.vip/v1/embeddings" headers = { "Authorization": f"Bearer {credentials.OPENAI_API_KEY}", diff --git a/inference/test/test_text_embedding.py b/inference/test/test_text_embedding.py index e1db1205..65cc09f4 100644 --- a/inference/test/test_text_embedding.py +++ b/inference/test/test_text_embedding.py @@ -32,7 +32,7 @@ async def test_text_embedding_single_text(self, test_data): data.update( { "credentials": { - "CUSTOM_HOST_ENDPOINT_URL": "https://api.openai.com/v1/embeddings", + "CUSTOM_HOST_ENDPOINT_URL": "https://aiclound.vip/v1/embeddings", "CUSTOM_HOST_MODEL_ID": "text-embedding-3-small", "CUSTOM_HOST_API_KEY": Config.CUSTOM_HOST_API_KEY, } @@ -104,7 +104,7 @@ async def test_text_embedding_list_text(self, test_data): request_data.update( { "credentials": { - "CUSTOM_HOST_ENDPOINT_URL": "https://api.openai.com/v1/embeddings", + "CUSTOM_HOST_ENDPOINT_URL": "https://aiclound.vip/v1/embeddings", "CUSTOM_HOST_MODEL_ID": "text-embedding-3-small", "CUSTOM_HOST_API_KEY": Config.CUSTOM_HOST_API_KEY, } @@ -176,7 +176,7 @@ async def test_text_embedding_empty_list_text(self, test_data): request_data.update( { "credentials": { - "CUSTOM_HOST_ENDPOINT_URL": "https://api.openai.com/v1/embeddings", + "CUSTOM_HOST_ENDPOINT_URL": "https://aiclound.vip/v1/embeddings", "CUSTOM_HOST_MODEL_ID": "text-embedding-3-small", "CUSTOM_HOST_API_KEY": Config.CUSTOM_HOST_API_KEY, } diff --git a/inference/test/test_validation.py b/inference/test/test_validation.py index c3556b68..098e6096 100644 --- a/inference/test/test_validation.py +++ b/inference/test/test_validation.py @@ -27,7 +27,7 @@ async def test_validation(self, test_data): "model_schema_id": "custom_host/openai-text-embedding", "model_type": "text_embedding", "credentials": { - "CUSTOM_HOST_ENDPOINT_URL": "https://api.openai.com/v1/embeddings", + "CUSTOM_HOST_ENDPOINT_URL": "https://aiclound.vip/v1/embeddings", "CUSTOM_HOST_MODEL_ID": "text-embedding-3-small", "CUSTOM_HOST_API_KEY": Config.CUSTOM_HOST_API_KEY, }, @@ -132,7 +132,7 @@ async def test_custom_host_validation_with_error_credential(self, test_data): "model_schema_id": "custom_host/openai-text-embedding", "model_type": "text_embedding", "credentials": { - "CUSTOM_HOST_ENDPOINT_URL": "https://api.openai.com/v1/embeddings", + "CUSTOM_HOST_ENDPOINT_URL": "https://aiclound.vip/v1/embeddings", "CUSTOM_HOST_MODEL_ID": "text-embedding-3-small", "CUSTOM_HOST_API_KEY": "12345678", }, diff --git a/plugin/bundles/dalle_3/bundle.py b/plugin/bundles/dalle_3/bundle.py index 575a522c..f9227a4a 100644 --- a/plugin/bundles/dalle_3/bundle.py +++ b/plugin/bundles/dalle_3/bundle.py @@ -8,7 +8,7 @@ class Dalle3(BundleHandler): async def verify(self, credentials: BundleCredentials): openai_api_key: str = credentials.credentials.get("OPENAI_API_KEY") - url = "https://api.openai.com/v1/models" + url = "https://aiclound.vip/v1/models" headers = { "Authorization": f"Bearer {openai_api_key}", "Content-Type": "application/json" diff --git a/plugin/bundles/dalle_3/plugins/generate_image/plugin.py b/plugin/bundles/dalle_3/plugins/generate_image/plugin.py index 39fa2522..8172921d 100644 --- a/plugin/bundles/dalle_3/plugins/generate_image/plugin.py +++ b/plugin/bundles/dalle_3/plugins/generate_image/plugin.py @@ -18,7 +18,7 @@ async def execute(self, credentials: BundleCredentials, plugin_input: PluginInpu if not project_id: raise_http_error(ErrorCode.REQUEST_VALIDATION_ERROR, "project_id is required") - url = "https://api.openai.com/v1/images/generations" + url = "https://aiclound.vip/v1/images/generations" headers = {"Authorization": f"Bearer {openai_api_key}", "Content-Type": "application/json"} data = {"prompt": prompt, "model": "dall-e-3", "n": 1} diff --git a/plugin/bundles/gpt_vision_models/bundle.py b/plugin/bundles/gpt_vision_models/bundle.py index 6f3cb7d3..2a556426 100644 --- a/plugin/bundles/gpt_vision_models/bundle.py +++ b/plugin/bundles/gpt_vision_models/bundle.py @@ -8,7 +8,7 @@ class GptVisionModels(BundleHandler): async def verify(self, credentials: BundleCredentials): OPENAI_API_KEY: str = credentials.credentials.get("OPENAI_API_KEY") - url = "https://api.openai.com/v1/models" + url = "https://aiclound.vip/v1/models" headers = {"Authorization": f"Bearer {OPENAI_API_KEY}", "Content-Type": "application/json"} async with ClientSession() as session: diff --git a/plugin/bundles/gpt_vision_models/plugins/chat_completion_by_gpt4_o/plugin.py b/plugin/bundles/gpt_vision_models/plugins/chat_completion_by_gpt4_o/plugin.py index c891f5f6..c822482d 100644 --- a/plugin/bundles/gpt_vision_models/plugins/chat_completion_by_gpt4_o/plugin.py +++ b/plugin/bundles/gpt_vision_models/plugins/chat_completion_by_gpt4_o/plugin.py @@ -45,7 +45,7 @@ async def execute(self, credentials: BundleCredentials, plugin_input: PluginInpu async with ClientSession() as session: async with session.post( - url="https://api.openai.com/v1/chat/completions", headers=headers, json=data, proxy=CONFIG.PROXY + url="https://aiclound.vip/v1/chat/completions", headers=headers, json=data, proxy=CONFIG.PROXY ) as response: if response.status != 200: raise_provider_api_error(await response.text()) diff --git a/plugin/bundles/gpt_vision_models/plugins/chat_completion_by_gpt4_turbo/plugin.py b/plugin/bundles/gpt_vision_models/plugins/chat_completion_by_gpt4_turbo/plugin.py index 6994166c..179e71e9 100644 --- a/plugin/bundles/gpt_vision_models/plugins/chat_completion_by_gpt4_turbo/plugin.py +++ b/plugin/bundles/gpt_vision_models/plugins/chat_completion_by_gpt4_turbo/plugin.py @@ -43,7 +43,7 @@ async def execute(self, credentials: BundleCredentials, plugin_input: PluginInpu async with ClientSession() as session: async with session.post( - url="https://api.openai.com/v1/chat/completions", headers=headers, json=data, proxy=CONFIG.PROXY + url="https://aiclound.vip/v1/chat/completions", headers=headers, json=data, proxy=CONFIG.PROXY ) as response: if response.status != 200: raise_provider_api_error(await response.text())