List Active Jobs#

Prerequisites#

Before you can list active customization jobs, make sure that you have:

  • Obtained the base URL of your NeMo Customizer service.

  • Set the CUSTOMIZER_BASE_URL environment variable to your NeMo Customizer service endpoint

export CUSTOMIZER_BASE_URL="https://your-customizer-service-url"

To List Active Customization Jobs#

Choose one of the following options to list active customization jobs.

import os
from nemo_microservices import NeMoMicroservices

# Initialize the client
client = NeMoMicroservices(
    base_url=os.environ['CUSTOMIZER_BASE_URL']
)

# List active customization jobs with filters
jobs = client.customization.jobs.list(
    page=1,
    page_size=10,
    filter={
        "finetuning_type": "lora",
        "training_type": "sft",
        "batch_size": 8,
        "epochs": 3,
        "log_every_n_steps": 1,
        "dataset": "string",
        "status": "created"
    },
    sort="created_at"
)

print(f"Found {len(jobs.data)} jobs")
for job in jobs.data:
    print(f"Job {job.id}: {job.status}")
BASE_MODEL="meta/llama-3.1-8b-instruct"
FINETUNING_TYPE="lora"
TRAINING_TYPE="sft"
BATCH_SIZE="8"
EPOCHS="3"
LOG_EVERY_N_STEPS="1"
DATASET="string"
STATUS="created"

curl --get \
  "${CUSTOMIZER_BASE_URL}/customization/jobs" \
  --data-urlencode "page=1" \
  --data-urlencode "filter[finetuning_type]=${FINETUNING_TYPE}" \
  --data-urlencode "filter[training_type]=${TRAINING_TYPE}" \
  --data-urlencode "filter[batch_size]=${BATCH_SIZE}" \
  --data-urlencode "filter[epochs]=${EPOCHS}" \
  --data-urlencode "filter[log_every_n_steps]=${LOG_EVERY_N_STEPS}" \
  --data-urlencode "filter[dataset]=${DATASET}" \
  --data-urlencode "filter[status]=${STATUS}" \
  -H 'accept: application/json' \
  -H 'Content-Type: application/json' \
  | jq
Example Response
{
  "object": "list",
  "data": [
    {
      "id": "cust-JGTaMbJMdqjJU8WbQdN9Q2",
      "created_at": "2025-02-10T16:45:51.775Z",
      "updated_at": "2025-02-10T16:45:51.775Z",
      "namespace": "default",
      "description": "Fine-tuning llama model for code generation",
      "config": "meta/llama-3.1-8b-instruct",
      "dataset": "default/code-generation-dataset",
      "hyperparameters": {
        "finetuning_type": "lora",
        "training_type": "sft",
        "batch_size": 8,
        "epochs": 50,
        "learning_rate": 0.0001,
        "lora": {
          "adapter_dim": 8,
          "adapter_dropout": 1
        }
      },
      "output_model": "code-gen-llama-v1",
      "status": "running",
      "project": "code-generation-project",
      "custom_fields": {
        "team": "nlp-research",
        "priority": "high",
        "use_case": "code-generation"
      },
      "ownership": {
        "created_by": "john.doe@example.com",
        "access_policies": {
          "team": "nlp-research",
          "read": ["nlp-research", "ml-ops"],
          "write": ["nlp-research"]
        }
      }
    }
  ],
  "pagination": {
    "page": 1,
    "page_size": 10,
    "current_page_size": 1,
    "total_pages": 1,
    "total_results": 1
  },
  "sort": "created_at:desc",
  "filter": {
    "base_model": "meta/llama-3_1-8b-instruct",
    "finetuning_type": "lora",
    "training_type": "sft",
    "batch_size": 8,
    "epochs": 50,
    "log_every_n_steps": 100,
    "dataset": "default/code-generation-dataset",
    "status": "running"
  }
}