Skip to content

Commit

Permalink
Release 0.1.16 (#19)
Browse files Browse the repository at this point in the history
* Fix bug in external dependency configuration (#16)

* Add examples for common configurations (#17)

* Update llamacloud deployment commands (#18)
  • Loading branch information
epicchewy authored Sep 24, 2024
1 parent 5f9b785 commit 80a09c6
Show file tree
Hide file tree
Showing 21 changed files with 478 additions and 48 deletions.
9 changes: 9 additions & 0 deletions .github/workflows/helm_lint_test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,15 @@ jobs:
--validate-maintainers=false \
--check-version-increment=false
- name: Setup helm unittest
id: setup-helm-unittest
run: helm plugin install https://github.com/helm-unittest/helm-unittest

- name: Run Unit Test
id: unit-test-charts
run: |
helm unittest ./charts/llamacloud
install-test:
name: "Install Helm Charts"

Expand Down
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,5 @@
.idea/

.DS_STORE

.debug/
2 changes: 2 additions & 0 deletions charts/llamacloud/.helmignore
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,5 @@
.idea/
*.tmproj
.vscode/

tests/
4 changes: 2 additions & 2 deletions charts/llamacloud/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,5 +45,5 @@ keywords:
- llamacloud
- rag

version: 0.1.15
appVersion: "0.1.15"
version: 0.1.16
appVersion: "0.1.16"
29 changes: 14 additions & 15 deletions charts/llamacloud/README.md

Large diffs are not rendered by default.

84 changes: 84 additions & 0 deletions charts/llamacloud/examples/basic-autoscaling-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
# This example shows how to configure autoscaling for the LlamaCloud platform.
# It is used in the `values.yaml` file for each component.

# Autoscaling is enabled by default for llamaParse and llamaParseOcr given the nature of their workloads.
# To disable autoscaling for a component, set `autoscaling.enabled` to `false`.

global:
config:
licenseKey: "<REPLACE-WITH-LLAMACLOUD-LICENSE-KEY>"
# existingLicenseKeySecret: "<uncomment-if-using-existing-secret>"

backend:
autoscaling:
enabled: true
# below are the default values if not specified
# change them as needed :)
minReplicas: 1
maxReplicas: 8
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

config:
openAiApiKey: "<REPLACE-WITH-OPENAI-API-KEY>"
# existingOpenAiApiKeySecret: "<uncomment-if-using-existing-secret>"

oidc:
discoveryUrl: "https://login.microsoftonline.com/your-tenant-id/oauth2/v2.0/token"
clientId: "your-client-id"
clientSecret: "your-client-secret"
# existingSecretName: "oidc-secret"

jobsService:
autoscaling:
enabled: true
# below are the default values if not specified
# change them as needed :)
minReplicas: 1
maxReplicas: 4
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

jobsWorker:
autoscaling:
enabled: true
# below are the default values if not specified
# change them as needed :)
minReplicas: 1
maxReplicas: 4
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

llamaParse:
config:
openaiApiKey: "<REPLACE-WITH-OPENAI-API-KEY>"
# existingOpenAiApiKeySecret: "<uncomment-if-using-existing-secret>"

autoscaling:
enabled: true
# below are the default values if not specified
# change them as needed :)
minReplicas: 2
maxReplicas: 10
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

llamaParseOcr:
autoscaling:
enabled: true
# below are the default values if not specified
# change them as needed :)
minReplicas: 2
maxReplicas: 10
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

usage:
autoscaling:
enabled: true
# below are the default values if not specified
# change them as needed :)
minReplicas: 1
maxReplicas: 4
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80
30 changes: 30 additions & 0 deletions charts/llamacloud/examples/basic-azure-openai.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
global:
config:
licenseKey: "<REPLACE-WITH-LLAMACLOUD-LICENSE-KEY>"
# existingLicenseKeySecret: "<uncomment-if-using-existing-secret>"

backend:
config:
azureOpenAi:
enabled: true
key: "<REPLACE-WITH-AZURE-OPENAI-API-KEY>"
endpoint: "<REPLACE-WITH-AZURE-OPENAI-ENDPOINT>"
deploymentName: "<REPLACE-WITH-AZURE-OPENAI-DEPLOYMENT-NAME>"
apiVersion: "<REPLACE-WITH-AZURE-OPENAI-API-VERSION>"
# existingSecret: "<uncomment-if-using-existing-secret>"

oidc:
discoveryUrl: "https://login.microsoftonline.com/your-tenant-id/oauth2/v2.0/token"
clientId: "your-client-id"
clientSecret: "your-client-secret"
# existingSecretName: "oidc-secret"

llamaParse:
config:
azureOpenAi:
enabled: true
key: "<REPLACE-WITH-AZURE-OPENAI-API-KEY>"
endpoint: "<REPLACE-WITH-AZURE-OPENAI-ENDPOINT>"
deploymentName: "<REPLACE-WITH-AZURE-OPENAI-DEPLOYMENT-NAME>"
apiVersion: "<REPLACE-WITH-AZURE-OPENAI-API-VERSION>"
# existingSecret: "<uncomment-if-using-existing-secret>"
20 changes: 20 additions & 0 deletions charts/llamacloud/examples/basic-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
global:
config:
licenseKey: "<REPLACE-WITH-LLAMACLOUD-LICENSE-KEY>"
# existingLicenseKeySecret: "<uncomment-if-using-existing-secret>"

backend:
config:
openAiApiKey: "<REPLACE-WITH-OPENAI-API-KEY>"
# existingOpenAiApiKeySecret: "<uncomment-if-using-existing-secret>"

oidc:
discoveryUrl: "https://login.microsoftonline.com/your-tenant-id/oauth2/v2.0/token"
clientId: "your-client-id"
clientSecret: "your-client-secret"
# existingSecretName: "oidc-secret"

llamaParse:
config:
openaiApiKey: "<REPLACE-WITH-OPENAI-API-KEY>"
# existingOpenAiApiKeySecret: "<uncomment-if-using-existing-secret>"
3 changes: 0 additions & 3 deletions charts/llamacloud/examples/basic.yaml

This file was deleted.

12 changes: 12 additions & 0 deletions charts/llamacloud/examples/custom-filestore-buckets-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
global:
config:
licenseKey: "<input-license-key-here>"
# existingLicenseKeySecret: ""

parsedDocumentsCloudBucketName: "<your-bucket-name>"
parsedEtlCloudBucketName: "<your-bucket-name>"
parsedExternalComponentsCloudBucketName: "<your-bucket-name>"
parsedFileParsingCloudBucketName: "<your-bucket-name>"
parsedRawFileCloudBucketName: "<your-bucket-name>"
parsedLlamaCloudParseOutputCloudBucketName: "<your-bucket-name>"
parsedFileScreenshotCloudBucketName: "<your-bucket-name>"
58 changes: 58 additions & 0 deletions charts/llamacloud/examples/external-deps-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# This example shows how to configure the LlamaCloud platform to use external dependencies.
# If an external dependency is enabled, the platform will not deploy self-hosted dependency.
# Instead, it will use the values below to connect to the external dependency.
# You can find the env vars to set in the platform's Helm chart here:
# https://github.com/run-llama/helm-charts/blob/main/charts/llamacloud/templates/_helpers.tpl

global:
config:
licenseKey: "<input-license-key-here>"
# existingLicenseKeySecret: ""

postgresql:
external:
enabled: true
host: "my-postgresql-host"
port: "5432"
database: "my-database"
username: "my-user"
password: "my-password"
# existingSecretName: "my-existing-secret"

mongodb:
external:
enabled: true
host: "my-mongodb-host"
port: "27017"
username: "my-user"
password: "my-password"
# existingSecretName: "my-existing-secret"

rabbitmq:
external:
enabled: true
scheme: "amqp"
host: "my-rabbitmq-host"
port: "5672"
username: "my-user"
password: "my-password"
# existingSecretName: "my-existing-secret"

redis:
external:
enabled: true
host: "my-redis-host"
port: "6379"
# existingSecretName: "my-existing-secret

postgresql:
enabled: false

mongodb:
enabled: false

rabbitmq:
enabled: false

redis:
enabled: false
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
# This example shows a full configuration for the LlamaCloud platform on Azure.
# It shows how to configure the platform to use Azure OpenAI and Azure Blob Storage (using s3proxy).
# We currently do not have native support for non-S3 filestores in LlamaCloud at the moment.

global:
cloudProvider: azure

Expand Down
110 changes: 110 additions & 0 deletions charts/llamacloud/examples/medium-size-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
# This is an example of a medium-size configuration for the LlamaCloud platform.
# It is designed to be a good starting point for a production environment.
# Our platform was designed to be horizontally scalable, so you can adjust the number of replicas for each component to fit your needs.
# For larger workloads (both large files and large number of files/jobs), you may need to increase the number of replicas and resources for each component.
# We also recommend using managed services for PostgreSQL, MongoDB, RabbitMQ, and Redis for production workloads, instead of self-hosted deployments.

global:
config:
licenseKey: "<REPLACE-WITH-LLAMACLOUD-LICENSE-KEY>"
# existingLicenseKeySecret: "<uncomment-if-using-existing-secret>"

backend:
autoscaling:
enabled: true
minReplicas: 4
maxReplicas: 8
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

resources:
requests:
cpu: 1
memory: 2Gi
limits:
cpu: 2
memory: 4Gi

config:
openAiApiKey: "<REPLACE-WITH-OPENAI-API-KEY>"
# existingOpenAiApiKeySecret: "<uncomment-if-using-existing-secret>"

oidc:
discoveryUrl: "https://login.microsoftonline.com/your-tenant-id/oauth2/v2.0/token"
clientId: "your-client-id"
clientSecret: "your-client-secret"
# existingSecretName: "oidc-secret"

jobsService:
autoscaling:
enabled: true
minReplicas: 4
maxReplicas: 8
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

resources:
requests:
cpu: 1
memory: 500Mi
limits:
cpu: 2
memory: 2Gi

jobsWorker:
autoscaling:
enabled: true
minReplicas: 4
maxReplicas: 8
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

resources:
requests:
cpu: 1
memory: 2Gi
limits:
cpu: 2
memory: 4Gi

llamaParse:
config:
openaiApiKey: "<REPLACE-WITH-OPENAI-API-KEY>"
# existingOpenAiApiKeySecret: "<uncomment-if-using-existing-secret>"

autoscaling:
enabled: true
minReplicas: 5
maxReplicas: 20
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

llamaParseOcr:
autoscaling:
enabled: true
minReplicas: 5
maxReplicas: 20
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

usage:
autoscaling:
enabled: true
minReplicas: 2
maxReplicas: 8
targetCPUUtilizationPercentage: 80
targetMemoryUtilizationPercentage: 80

# (recommended) Disable all dependencies to use external services for production workloads

postgresql:
enabled: false

mongodb:
enabled: false

rabbitmq:
enabled: false

redis:
enabled: false
16 changes: 16 additions & 0 deletions charts/llamacloud/examples/private-registry-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# This is an example of how to configure the LlamaCloud platform to use a private image registry.
# Simply add a secret to the list of .Values.imagePullSecrets in the global section.

global:
config:
licenseKey: "<contact-support-for-key>"
# existingLicenseKeySecret: "<existing-secret-name>"

imagePullSecrets:
- name: "<your-pull-secret-name>"

backend:
# You can also attach your own IAM role annotations to the service account if needed.
# serviceAccount:
# annotations:
# eks.amazonaws.com/role-arn: "<your-role-arn>"
Loading

0 comments on commit 80a09c6

Please sign in to comment.