Dev environment
This pages describes how entire Universe service orchestration stack can be deployed on a single machine using multiple Kind clusters to separate the components of infrastructure cluster from the components of the tenant cluster.
Increase open files limit
Default limit 1024 is too low, you need to increase it
Check current limit for your user
            
            ulimit -Hn
    
To change limit for your user, edit /etc/security/limits.conf
            
            <your user>             hard    nofile          10000
<your user>             soft    nofile          10000
    
Settings in /etc/security/limits.conf will take effect after complete logout of your user.
Increase inotify limits
            
            cat << 'EOF' | tee /etc/sysctl.d/10-kind-inotify.conf
fs.inotify.max_user_watches = 524288
fs.inotify.max_user_instances = 512
EOF
sysctl -p
    
Install kubectl
Follow kubectl official installation guide
Use latest available version.
Install Helm3
Follow Helm official installation guide
Use latest available version.
Install Kind
Follow Kind official installation guide
Use latest available version.
Create config folder
Create .universe-dev folder which will be used to store configuration
            
            mkdir ~/.universe-dev
    
Enable shell completion for Kubectl and Helm
            
            mkdir -p ~/.universe-dev/completion
kubectl completion bash > ~/.universe-dev/completion/kubectl.bash.inc
helm completion bash > ~/.universe-dev/completion/helm.bash.inc
printf '
source $HOME/.universe-dev/completion/kubectl.bash.inc
source $HOME/.universe-dev/completion/kubectl.bash.inc
if [ -f "$HOME/.universe-dev/completion/clusters" ] ; then
source $HOME/.universe-dev/completion/clusters
fi
' > ~/.universe-dev/completion/include
source ~/.universe-dev/completion/include
    
Also you can add source $HOME/.universe-dev/completion/include to your bash_profile or bashrc file
to include these sources on shell start
            
            echo 'source $HOME/.universe-dev/completion/include' >> ~/.bashrc
    
Create docker networks
This guide assume that you using docker as a provider in Kind. Podman provider doesn’t support custom networks at the moment.
For icp cluster we will use 10.133.133.0/24 net, your local host will have 10.133.133.1 ip
For tcp1 cluster we will use 10.133.134.0/24 net, your local host will have 10.133.134.1 ip
            
            # for iCP cluster
docker network create --subnet 10.133.133.0/24 kind-icp
# for first tenant cluster
docker network create --subnet 10.133.134.0/24 kind-tcp1
    
Create Kind config
            
            cat << 'EOF' | tee ~/.universe-dev/kind-icp.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: icp
nodes:
- role: control-plane
extraPortMappings:
- containerPort: 30001
hostPort: 30001
- containerPort: 30002
hostPort: 30002
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
name: icp-master
- role: worker
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
name: dpu1-host-a
- role: worker
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
name: dpu1-host-b
EOF
    
Deploy icp Kind cluster
            
            KIND_EXPERIMENTAL_DOCKER_NETWORK=kind-icp kind create cluster --config ~/.universe-dev/kind-icp.yaml
    
Copy icp kubeconfig
            
            kind get kubeconfig --name icp > ~/.universe-dev/kubeconfig-icp
chmod 700 ~/.universe-dev/kubeconfig-icp
    
Configure icp alias
Set k-icp alias to access infrastructure cluster with kubectl
Set h-icp alias to access infrastructure cluster with helm
            
            printf '
_k-icp() {
env KUBECONFIG=~/.universe-dev/kubeconfig-icp kubectl "$@"
}
_h-icp() {
env KUBECONFIG=~/.universe-dev/kubeconfig-icp helm "$@"
}
alias k-icp="_k-icp"
complete -F __start_kubectl k-icp
alias h-icp="_h-icp"
complete -F __start_helm h-icp
' >> ~/.universe-dev/completion/clusters
source ~/.universe-dev/completion/include
    
Create tcp1 Kind config
            
            cat << 'EOF' | tee ~/.universe-dev/kind-tcp1.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: tcp1
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
name: tenant1-master
- role: worker
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
name: host-a
- role: worker
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
name: host-b
EOF
    
Deploy tcp1 Kind cluster
            
            KIND_EXPERIMENTAL_DOCKER_NETWORK=kind-tcp1 kind create cluster --config ~/.universe-dev/kind-tcp1.yaml
    
Copy tcp1 kubeconfig
            
            kind get kubeconfig --name tcp1 > ~/.universe-dev/kubeconfig-tcp1
chmod 700 ~/.universe-dev/kubeconfig-tcp1
    
Configure tcp1 alias
Set k-tcp1 alias to access tcp1 cluster with kubectl
Set  h-tcp1 alias to access tcp1 cluster with helm
            
            printf '
_k-tcp1() {
env KUBECONFIG=~/.universe-dev/kubeconfig-tcp1 kubectl "$@"
}
_h-tcp1() {
env KUBECONFIG=~/.universe-dev/kubeconfig-tcp1 helm "$@"
}
alias k-tcp1="_k-tcp1"
complete -F __start_kubectl k-tcp1
alias h-tcp1="_h-tcp1"
complete -F __start_helm h-tcp1
' >> ~/.universe-dev/completion/clusters
source ~/.universe-dev/completion/include
    
git clone -b main https://gitlab-master.nvidia.com/cloud-orchestration/universe-helm-charts
Deploy iCP components
            
            cd universe-helm-charts/universe-infra-control-plane
cat << 'EOF' | tee env-config.yaml
universe-infra-admin-controller:
tenantConfig:
create: true
tenants:
- id: tenant1
hostnames:
- host-a
- host-b
dpuInventory:
create: true
dpus:
- id: dpu1-host-a
host: host-a
- id: dpu1-host-b
host: host-b
EOF
h-icp install -n universe --create-namespace \
    -f example-values-dev.yaml \
    -f env-config.yaml \
    icp .
    
Deploy tCP components
            
            cd ../universe-tenant-control-plane
cat << 'EOF' | tee env-config.yaml
global:
sidecars:
proxy:
config:
listener:
inject_headers:
tenant-id: tenant1
upstream:
address: 10.133.133.1 # Infra API gateway address
EOF
h-tcp1 install -n universe --create-namespace \
    -f example-values-dev.yaml \
    -f env-config.yaml \
    tcp .
    
            
            cat << 'EOF' | tee tenant-pod1.yaml
apiVersion: resource.universe.nvidia.com/v1alpha1
kind: UVSPod
metadata:
name: tenant-pod1
namespace: universe
spec:
object:
apiVersion: v1
kind: Pod
metadata:
name: tenant-pod1
spec:
containers:
- name: nginx
image: nginx:1.14.2
EOF
k-tcp1 apply -f tenant-pod1.yaml
k-tcp1 get uvspods.resource.universe.nvidia.com -n universe tenant-pod1 -o jsonpath='{.status.syncResult}{"\n"}'
{"result":"success"}
    
This example shows how to replace Universe component in your dev clusters
We will replace universe-infra-admin-controller component
            
            cd universe-infra-services/infra-admin-controller
make build
make IMG=local/universe-infra-admin-controller:dev docker-build
make IMG=local/universe-infra-admin-controller:dev KIND_CLUSTER=icp kind-load-image
# change directory to the charts dir
cd ~/universe-helm-charts/universe-infra-control-plane
h-icp upgrade -n universe --reuse-values=true \
    --set universe-infra-admin-controller.operator.image.registry=local/ \
    --set universe-infra-admin-controller.operator.image.tag="dev" icp .