List Active Jobs#
Prerequisites#
Before you can list active customization jobs, make sure that you have:
Obtained the base URL of your NeMo Customizer service.
List Active Customization Jobs#
API#
Perform a GET request to the
/v1/customization/jobs
endpoint.BASE_MODEL="meta/llama-3.1-8b-instruct" FINETUNING_TYPE="lora" TRAINING_TYPE="sft" BATCH_SIZE="8" EPOCHS="3" LOG_EVERY_N_STEPS="1" DATASET="string" STATUS="created" curl --get \ "https://${CUSTOMIZER_HOSTNAME}/v1/customization/jobs" \ --data-urlencode "page=1" \ --data-urlencode "filter[finetuning_type]=${FINETUNING_TYPE}" \ --data-urlencode "filter[training_type]=${TRAINING_TYPE}" \ --data-urlencode "filter[batch_size]=${BATCH_SIZE}" \ --data-urlencode "filter[epochs]=${EPOCHS}" \ --data-urlencode "filter[log_every_n_steps]=${LOG_EVERY_N_STEPS}" \ --data-urlencode "filter[dataset]=${DATASET}" \ --data-urlencode "filter[status]=${STATUS}" \ -H 'accept: application/json' \ -H 'Content-Type: application/json' \ | jq
Review the returned customization jobs.
Example Response
{ "object": "list", "data": [ { "id": "cust-JGTaMbJMdqjJU8WbQdN9Q2", "created_at": "2025-02-10T16:45:51.775Z", "updated_at": "2025-02-10T16:45:51.775Z", "namespace": "default", "description": "Fine-tuning llama model for code generation", "config": "meta/llama-3.1-8b-instruct", "dataset": "default/code-generation-dataset", "hyperparameters": { "finetuning_type": "lora", "training_type": "sft", "batch_size": 8, "epochs": 50, "learning_rate": 0.0001, "lora": { "adapter_dim": 8, "adapter_dropout": 1 } }, "output_model": "code-gen-llama-v1", "status": "running", "project": "code-generation-project", "custom_fields": { "team": "nlp-research", "priority": "high", "use_case": "code-generation" }, "ownership": { "created_by": "john.doe@example.com", "access_policies": { "team": "nlp-research", "read": ["nlp-research", "ml-ops"], "write": ["nlp-research"] } } } ], "pagination": { "page": 1, "page_size": 10, "current_page_size": 1, "total_pages": 1, "total_results": 1 }, "sort": "created_at:desc", "filter": { "base_model": "meta/llama-3_1-8b-instruct", "finetuning_type": "lora", "training_type": "sft", "batch_size": 8, "epochs": 50, "log_every_n_steps": 100, "dataset": "default/code-generation-dataset", "status": "running" } }