polardbx.aliyun.com/v1 PolarDBXCluster
By using PolarDBXCluster, you can freely define the topology, specifications, and configurations of the PolarDB-X Enterprise Edition cluster, and support deployments of super-large scale and different levels of disaster tolerance.
The following are the configurable items and the meanings of the related fields:
apiVersion: polardbx.aliyun.com/v1
kind: PolarDBXCluster
metadata:
name: full
spec:
# **Optional**
#
# Whether to use DN-0 as a shared GMS to save resources, default value is false
#
# Not recommended for production clusters
shareGMS: false
# **Optional**
#
# The MySQL protocol version supported by the PolarDB-X cluster, default value is 8.0
# Available values: 5.7, 8.0
protocolVersion: 8.0
# **Optional**
#
# The service name exposed within Kubernetes for the PolarDB-X cluster, defaults to .metadata.name
serviceName: full
# **Optional**
#
# The service type exposed within Kubernetes for the PolarDB-X cluster, defaults to ClusterIP
# Available values can be referenced from Service types
#
# Note: LoadBalancer can be used in cloud-based Kubernetes clusters to bind with LB
serviceType: LoadBalancer
# **Optional**
#
# Whether the PolarDB-X cluster is a read-only instance, default is false
readonly: false
# **Optional**
#
# The name of the primary instance to which the PolarDB-X read-only instance belongs, default is empty
# This field is invalid when this instance is not a read-only instance
primaryCluster: pxc-master
# **Optional**
#
# Read-only instances attached to the main PolarDB-X instance will take effect only when this instance is not read-only
# When this instance is created, read-only instances with the same specifications and parameters will be created based on the following information
# This field cannot be modified and is only effective during creation
initReadonly:
- # Number of read-only instance CNs
cnReplicas: 1
# **Optional**
#
# Read-only instance suffix name, if not filled in, a random suffix will be generated
name: readonly
# **Optional**
#
# Read-only instance parameters
extraParams:
AttendHtap: "true"
# **Optional**
#
# PolarDB-X cluster security configuration
security:
# **Optional**
#
# TLS-related configuration, currently not effective
tls:
secretName: tls-secret
# **Optional**
#
# Specifies the key used to encode internal passwords, referencing the specified Secret's key
encodeKey:
name: ek-secret
key: key
# *Optional**
#
# PolarDB-X initial account configuration
privileges:
- username: admin
password: "123456"
type: SUPER
# PolarDB-X cluster configuration
config:
# CN-related configuration
cn:
# Static configuration, modifying will result in the CN cluster being rebuilt
static:
# Enable coroutines, OpenJDK does not support, dragonwell needs to be used
EnableCoroutine: false
# Enable consistent read in standby
EnableReplicaRead: false
# Enable remote debugging for JVM
EnableJvmRemoteDebug: false
# Custom CN static configuration, key-value structure
ServerProperties:
processors: 8
# Whether to enable MPP capability on this (read-only) instance CN. The main instance CN is enabled by default
# When this parameter is enabled, this instance will participate in multi-machine parallel processing (MPP) and share the read traffic of the main instance, otherwise it will not participate
AttendHtap: false
# Dynamic configuration, modification and apply will be automatically pushed by the operator, key-value structure
dynamic:
CONN_POOL_IDLE_TIMEOUT: 30
# DN-related configuration
dn:
# DN my.cnf configuration, overwriting part of the template
mycnfOverwrite: |-
loose_binlog_checksum: crc32
# DN log purge interval
logPurgeInterval: 5m
# Separate storage of logs and data
logDataSeparation: false
# PolarDB-X cluster topology
topology:
# Image version (tag) used by the cluster, default is empty (specified by the operator)
version: v1.0
# Cluster deployment rules
rules:
# Predefined node selectors
selectors:
- name: zone-a
nodeSelector:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- cn-hangzhou-a
- name: zone-b
nodeSelector:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- cn-hangzhou-b
- name: zone-c
nodeSelector:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- cn-hangzhou-c
components:
# **Optional**
#
# GMS deployment rules, defaults to DN
gms:
# Stacked deployment structure, the operator attempts to stack deploy in the nodes specified by the node selector
# Each child node of the storage node to achieve higher resource utilization, for testing purposes only
rolling:
replicas: 3
selector:
reference: zone-a
# Node set deployment structure, you can specify the node group and node selector of each DN child node,
# to achieve high availability deployment structures such as cross-region and cross-city
nodeSets:
- name: cand-zone-a
role: Candidate
replicas: 1
selector:
reference: zone-a
- name: cand-zone-b
role: Candidate
replicas: 1
selector:
reference: zone-b
- name: log-zone-c
role: Voter
replicas: 1
selector:
reference: zone-c
# **Optional**
#
# DN deployment rules, default is 3 nodes, all nodes can be deployed
dn:
nodeSets:
- name: cands
role: Candidate
replicas: 2
- name: log
role: Voter
replicas: 1
# **Optional**
#
# CN deployment rules, also divided into CN nodes
cn:
- name: zone-a
# Valid values: numbers, percentages, (0, 1] fractions, not filling in will mean the remaining replica (only one can be left blank)
# The sum cannot exceed .topology.nodes.cn.replicas
replicas: 1
selector:
reference: zone-a
- name: zone-b
replicas: 1 / 3
selector:
reference: zone-b
- name: zone-c
replicas: 34%
selector:
reference: zone-c
# **Optional**
#
# CDC deployment rules, same as CN
cdc:
- name: half
replicas: 50%
selector:
reference: zone-a
- name: half
# With + means rounding up
replicas: 50%+
selector:
reference: zone-b
nodes:
# **Optional**
#
# GMS specification configuration, defaults to the same as DN
gms:
template:
# Storage node engine, default galaxy
engine: galaxy
# Storage node image, default specified by the operator
image: polardbx-engine-2.0:latest
# Storage node service type, default is ClusterIP
serviceType: ClusterIP
# Whether the storage node pod uses the host network, default is true
hostNetwork: true
# Storage node disk space limit, no limit if not filled in (soft limit)
diskQuota: 10Gi
# Resources used by the storage node child node, default is 4c8g
resources:
limits:
cpu: 4
memory: 8Gi
# **Optional**
#
# DN specifications configuration
dn:
# Number of DNs, default is 2
replicas: 2
template:
resources:
limits:
cpu: 4
memory: 8Gi
# IO-related limitations, supporting BPS and IOPS limitations
limits.io:
iops: 1000
bps: 10Mi
# CN specification configuration, parameter interpretation same as DN
cn:
replicas: 3
template:
image: polardbx-sql:latest
hostNetwork: false
resources:
limits:
cpu: 4
memory: 8Gi
# CDC specification configuration, parameter interpretation same as CN, not configuring means not starting CDC capability
cdc:
replicas: 2
template:
image: polardbx-cdc:latest
hostNetwork: false
resources:
limits:
cpu: 4
memory: 8Gi
groups:
- name: "group1"
replicas: 1
template:
image: polardbx-cdc:latest
hostNetwork: false
resources:
limits:
cpu: 4
memory: 8Gi
config:
envs:
cluster_type: "BINLOG_X"
binlogx_stream_group_name: "group1"