# required
# count: 1
apiVersion: performance.openshift.io/v2
kind: PerformanceProfile
metadata:
name: $name
annotations:
# Some pods want the kernel stack to ignore IPv6 router Advertisement.
kubeletconfig.experimental: |
{"allowedUnsafeSysctls":["net.ipv6.conf.all.accept_ra"]}
spec:
cpu:
# node0 CPUs: 0-17,36-53
# node1 CPUs: 18-34,54-71
# siblings: (0,36), (1,37)...
# we want to reserve the first Core of each NUMA socket
#
# no CPU left behind! all-cpus == isolated + reserved
isolated: $isolated # eg 1-17,19-35,37-53,55-71
reserved: $reserved # eg 0,18,36,54
# Guaranteed QoS pods will disable IRQ balancing for cores allocated to the pod.
# default value of globallyDisableIrqLoadBalancing is false
globallyDisableIrqLoadBalancing: false
hugepages:
defaultHugepagesSize: 1G
pages:
# 32GB per numa node
- count: $count # eg 64
size: 1G
machineConfigPoolSelector:
# For SNO: machineconfiguration.openshift.io/role: 'master'
pools.operator.machineconfiguration.openshift.io/worker: ''
nodeSelector:
# For SNO: node-role.kubernetes.io/master: ""
node-role.kubernetes.io/worker: ""
workloadHints:
realTime: false
highPowerConsumption: false
perPodPowerManagement: true
realTimeKernel:
enabled: false
numa:
# All guaranteed QoS containers get resources from a single NUMA node
topologyPolicy: "single-numa-node"
net:
# Limit the number or IRQs to 8 (number or reserved CPUs)
# => this is good enough as I have only 262 IT now instead of 501
# but this is a waste... TODO: now waste (see below)!
#
# This is more than enough for eno1 (host and OVN)
# eno2 should be down. TODO: add MCO or use NMSTATE in OCP4.10
# SR-IOV PFs: only VFs are used. TODO: MCO to have a single IRQ
userLevelNetworking: false