From c19e80443859b7b3bce2c14b2d9af9d20241bea7 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Tue, 23 Jan 2024 14:46:01 +0300 Subject: [PATCH 01/24] impl remoteNodeSet api objects --- Makefile | 2 + api/v1alpha1/remotedatabasenodeset_types.go | 35 + api/v1alpha1/remotestoragenodeset_types.go | 35 + api/v1alpha1/zz_generated.deepcopy.go | 118 + .../crds/remotedatabasenodeset.yaml | 4653 +++++++++++++++++ .../crds/remotestoragenodeset.yaml | 4646 ++++++++++++++++ 6 files changed, 9489 insertions(+) create mode 100644 api/v1alpha1/remotedatabasenodeset_types.go create mode 100644 api/v1alpha1/remotestoragenodeset_types.go create mode 100644 deploy/ydb-operator/crds/remotedatabasenodeset.yaml create mode 100644 deploy/ydb-operator/crds/remotestoragenodeset.yaml diff --git a/Makefile b/Makefile index 8cc9915f..e21076d9 100644 --- a/Makefile +++ b/Makefile @@ -51,6 +51,8 @@ manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and Cust cp config/crd/bases/ydb.tech_databases.yaml deploy/ydb-operator/crds/database.yaml cp config/crd/bases/ydb.tech_storagenodesets.yaml deploy/ydb-operator/crds/storagenodeset.yaml cp config/crd/bases/ydb.tech_databasenodesets.yaml deploy/ydb-operator/crds/databasenodeset.yaml + cp config/crd/bases/ydb.tech_remotestoragenodesets.yaml deploy/ydb-operator/crds/remotestoragenodeset.yaml + cp config/crd/bases/ydb.tech_remotedatabasenodesets.yaml deploy/ydb-operator/crds/remotedatabasenodeset.yaml cp config/crd/bases/ydb.tech_databasemonitorings.yaml deploy/ydb-operator/crds/databasemonitoring.yaml cp config/crd/bases/ydb.tech_storagemonitorings.yaml deploy/ydb-operator/crds/storagemonitoring.yaml diff --git a/api/v1alpha1/remotedatabasenodeset_types.go b/api/v1alpha1/remotedatabasenodeset_types.go new file mode 100644 index 00000000..1dedba84 --- /dev/null +++ b/api/v1alpha1/remotedatabasenodeset_types.go @@ -0,0 +1,35 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.state",description="The status of this RemoteDatabaseNodeSet" +//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// RemoteDatabaseNodeSet declares NodeSet spec and status for objects in remote cluster +type RemoteDatabaseNodeSet struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // +optional + Spec DatabaseNodeSetSpec `json:"spec,omitempty"` + // +optional + // +kubebuilder:default:={state: "Pending"} + Status DatabaseNodeSetStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// RemoteDatabaseNodeSetList contains a list of RemoteDatabaseNodeSet +type RemoteDatabaseNodeSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RemoteDatabaseNodeSet `json:"items"` +} + +func init() { + SchemeBuilder.Register(&RemoteDatabaseNodeSet{}, &RemoteDatabaseNodeSetList{}) +} diff --git a/api/v1alpha1/remotestoragenodeset_types.go b/api/v1alpha1/remotestoragenodeset_types.go new file mode 100644 index 00000000..dde72221 --- /dev/null +++ b/api/v1alpha1/remotestoragenodeset_types.go @@ -0,0 +1,35 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.state",description="The status of this RemoteStorageNodeSet" +//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" + +// RemoteStorageNodeSet declares NodeSet spec and status for objects in remote cluster +type RemoteStorageNodeSet struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // +optional + Spec StorageNodeSetSpec `json:"spec,omitempty"` + // +optional + // +kubebuilder:default:={state: "Pending"} + Status StorageNodeSetStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// RemoteStorageNodeSetList contains a list of RemoteStorageNodeSet +type RemoteStorageNodeSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RemoteStorageNodeSet `json:"items"` +} + +func init() { + SchemeBuilder.Register(&RemoteStorageNodeSet{}, &RemoteStorageNodeSetList{}) +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index b5652ab8..ccdaf943 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -755,6 +755,124 @@ func (in *PodImage) DeepCopy() *PodImage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteDatabaseNodeSet) DeepCopyInto(out *RemoteDatabaseNodeSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteDatabaseNodeSet. +func (in *RemoteDatabaseNodeSet) DeepCopy() *RemoteDatabaseNodeSet { + if in == nil { + return nil + } + out := new(RemoteDatabaseNodeSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RemoteDatabaseNodeSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteDatabaseNodeSetList) DeepCopyInto(out *RemoteDatabaseNodeSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RemoteDatabaseNodeSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteDatabaseNodeSetList. +func (in *RemoteDatabaseNodeSetList) DeepCopy() *RemoteDatabaseNodeSetList { + if in == nil { + return nil + } + out := new(RemoteDatabaseNodeSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RemoteDatabaseNodeSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteStorageNodeSet) DeepCopyInto(out *RemoteStorageNodeSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteStorageNodeSet. +func (in *RemoteStorageNodeSet) DeepCopy() *RemoteStorageNodeSet { + if in == nil { + return nil + } + out := new(RemoteStorageNodeSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RemoteStorageNodeSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteStorageNodeSetList) DeepCopyInto(out *RemoteStorageNodeSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RemoteStorageNodeSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteStorageNodeSetList. +func (in *RemoteStorageNodeSetList) DeepCopy() *RemoteStorageNodeSetList { + if in == nil { + return nil + } + out := new(RemoteStorageNodeSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RemoteStorageNodeSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServerlessDatabaseResources) DeepCopyInto(out *ServerlessDatabaseResources) { *out = *in diff --git a/deploy/ydb-operator/crds/remotedatabasenodeset.yaml b/deploy/ydb-operator/crds/remotedatabasenodeset.yaml new file mode 100644 index 00000000..23b70cce --- /dev/null +++ b/deploy/ydb-operator/crds/remotedatabasenodeset.yaml @@ -0,0 +1,4653 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: remotedatabasenodesets.ydb.tech +spec: + group: ydb.tech + names: + kind: RemoteDatabaseNodeSet + listKind: RemoteDatabaseNodeSetList + plural: remotedatabasenodesets + singular: remotedatabasenodeset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The status of this RemoteDatabaseNodeSet + jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: RemoteDatabaseNodeSet declares NodeSet spec and status for objects + in remote cluster + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DatabaseNodeSetSpec describes an group nodes of Database + object + properties: + additionalAnnotations: + additionalProperties: + type: string + description: (Optional) Additional custom resource annotations that + are added to all resources + type: object + additionalLabels: + additionalProperties: + type: string + description: (Optional) Additional custom resource labels that are + added to all resources + type: object + affinity: + description: (Optional) If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + caBundle: + description: User-defined root certificate authority that is added + to system trust store of Storage pods on startup. + type: string + configuration: + description: YDB configuration in YAML format. Will be applied on + top of generated one in internal/configuration + type: string + databaseRef: + description: YDB Database namespaced reference + properties: + name: + maxLength: 63 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + namespace: + maxLength: 63 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + required: + - name + type: object + datastreams: + description: Datastreams config + properties: + enabled: + type: boolean + iam_service_account_key: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + required: + - enabled + type: object + domain: + default: Root + description: '(Optional) Name of the root storage domain Default: + Root' + maxLength: 63 + pattern: '[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?' + type: string + encryption: + description: Encryption configuration + properties: + enabled: + type: boolean + key: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + pin: + type: string + required: + - enabled + type: object + image: + description: (Optional) YDB Image + properties: + name: + description: 'Container image with supported YDB version. This + defaults to the version pinned to the operator and requires + a full container and tag/sha name. For example: cr.yandex/crptqonuodf51kdj7a7d/ydb:22.2.22' + type: string + pullPolicy: + description: '(Optional) PullPolicy for the image, which defaults + to IfNotPresent. Default: IfNotPresent' + type: string + pullSecret: + description: (Optional) Secret name containing the dockerconfig + to use for a registry that requires authentication. The secret + must be configured first by the user. + type: string + type: object + initContainers: + description: '(Optional) List of initialization containers belonging + to the pod. Init containers are executed in order prior to containers + being started. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/' + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must only be set if type + is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components + that enable the WindowsHostProcessContainers feature + flag. Setting this field without the feature flag + will result in errors when validating the Pod. All + of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + monitoring: + description: '(Optional) Monitoring sets configuration options for + YDB observability Default: ""' + properties: + enabled: + type: boolean + interval: + description: Interval at which metrics should be scraped + type: string + metricRelabelings: + description: RelabelConfig allows dynamic rewriting of the label + set, being applied to sample before ingestion. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + ``-section of Prometheus configuration. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + required: + - enabled + type: object + nodeSelector: + additionalProperties: + type: string + description: '(Optional) NodeSelector is a selector which must be + true for the pod to fit on a node. Selector which must match a node''s + labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + nodes: + description: Number of nodes (pods) in the cluster + format: int32 + type: integer + operatorSync: + default: true + description: Enables or disables operator's reconcile loop. `false` + means all the Pods are running, but the reconcile is effectively + turned off. `true` means the default state of the system, all Pods + running, operator reacts to specification change of this Database + resource. + type: boolean + path: + description: '(Optional) Custom database path in schemeshard Default: + //' + maxLength: 255 + pattern: /[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?/[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?(/[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?)* + type: string + pause: + default: false + description: The state of the Database processes. `true` means all + the Database Pods are being killed, but the Database resource is + persisted. `false` means the default state of the system, all Pods + running. + type: boolean + priorityClassName: + description: (Optional) If specified, the pod's priorityClassName. + type: string + resources: + description: (Optional) Database storage and compute resources + properties: + containerResources: + description: '(Optional) Database container resource limits. Any + container limits can be specified. Default: (not specified)' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + storageUnits: + description: 'Kind of the storage unit. Determine guarantees for + all main unit parameters: used hard disk type, capacity throughput, + IOPS etc.' + items: + properties: + count: + description: Number of units in this set. + format: int64 + type: integer + unitKind: + description: 'Kind of the storage unit. Determine guarantees + for all main unit parameters: used hard disk type, capacity + throughput, IOPS etc.' + type: string + required: + - count + - unitKind + type: object + type: array + type: object + secrets: + description: 'Secret names that will be mounted into the well-known + directory of every storage pod. Directory: `/opt/ydb/secrets//`' + items: + description: LocalObjectReference contains enough information to + let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + serverlessResources: + description: (Optional) If specified, created database will be "serverless". + properties: + sharedDatabaseRef: + description: Reference to YDB Database with configured shared + resources + properties: + name: + maxLength: 63 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + namespace: + maxLength: 63 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + required: + - name + type: object + required: + - sharedDatabaseRef + type: object + service: + description: '(Optional) Storage services parameter overrides Default: + (not specified)' + properties: + datastreams: + properties: + additionalAnnotations: + additionalProperties: + type: string + type: object + additionalLabels: + additionalProperties: + type: string + type: object + ipFamilies: + items: + description: IPFamily represents the IP Family (IPv4 or + IPv6). This type is used to express the family of an IP + expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by a Service + type: string + tls: + properties: + CA: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + certificate: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + enabled: + type: boolean + key: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + required: + - enabled + type: object + type: object + grpc: + properties: + additionalAnnotations: + additionalProperties: + type: string + type: object + additionalLabels: + additionalProperties: + type: string + type: object + externalHost: + type: string + ipFamilies: + items: + description: IPFamily represents the IP Family (IPv4 or + IPv6). This type is used to express the family of an IP + expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by a Service + type: string + tls: + properties: + CA: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + certificate: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + enabled: + type: boolean + key: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + required: + - enabled + type: object + type: object + interconnect: + properties: + additionalAnnotations: + additionalProperties: + type: string + type: object + additionalLabels: + additionalProperties: + type: string + type: object + ipFamilies: + items: + description: IPFamily represents the IP Family (IPv4 or + IPv6). This type is used to express the family of an IP + expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by a Service + type: string + tls: + properties: + CA: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + certificate: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + enabled: + type: boolean + key: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + required: + - enabled + type: object + type: object + status: + properties: + additionalAnnotations: + additionalProperties: + type: string + type: object + additionalLabels: + additionalProperties: + type: string + type: object + ipFamilies: + items: + description: IPFamily represents the IP Family (IPv4 or + IPv6). This type is used to express the family of an IP + expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by a Service + type: string + type: object + type: object + sharedResources: + description: (Optional) Shared resources can be used by serverless + databases. + properties: + containerResources: + description: '(Optional) Database container resource limits. Any + container limits can be specified. Default: (not specified)' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + storageUnits: + description: 'Kind of the storage unit. Determine guarantees for + all main unit parameters: used hard disk type, capacity throughput, + IOPS etc.' + items: + properties: + count: + description: Number of units in this set. + format: int64 + type: integer + unitKind: + description: 'Kind of the storage unit. Determine guarantees + for all main unit parameters: used hard disk type, capacity + throughput, IOPS etc.' + type: string + required: + - count + - unitKind + type: object + type: array + type: object + storageClusterRef: + description: YDB Storage cluster reference + properties: + name: + maxLength: 63 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + namespace: + maxLength: 63 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + required: + - name + type: object + storageEndpoint: + description: YDB Storage Node broker address + type: string + tolerations: + description: (Optional) If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: (Optional) If specified, the pod's topologySpreadConstraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods + that match this label selector are counted to determine the + number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to select + the pods over which spreading will be calculated. The keys + are used to lookup values from the incoming pod labels, those + key-value labels are ANDed with labelSelector to select the + group of existing pods over which spreading will be calculated + for the incoming pod. Keys that don't exist in the incoming + pod labels will be ignored. A null or empty list means only + match against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may + be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods + in an eligible domain or zero if the number of eligible domains + is less than MinDomains. For example, in a 3-zone cluster, + MaxSkew is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum is 1. | + zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew + is 1, incoming pod can only be scheduled to zone3 to become + 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) + on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming + pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that satisfy + it. It''s a required field. Default value is 1 and 0 is not + allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible + domains. When the number of eligible domains with matching + topology keys is less than minDomains, Pod Topology Spread + treats \"global minimum\" as 0, and then the calculation of + Skew is performed. And when the number of eligible domains + with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. As a result, when + the number of eligible domains is less than minDomains, scheduler + won't schedule more than maxSkew Pods to those domains. If + value is nil, the constraint behaves as if MinDomains is equal + to 1. Valid values are integers greater than 0. When value + is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For + example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains + is set to 5 and pods with the same labelSelector spread as + 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | + The number of domains is less than 5(MinDomains), so \"global + minimum\" is treated as 0. In this situation, new pod with + the same labelSelector cannot be scheduled, because computed + skew will be 3(3 - 0) if new Pod is scheduled to any of the + three zones, it will violate MaxSkew. \n This is a beta field + and requires the MinDomainsInPodTopologySpread feature gate + to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat + Pod's nodeAffinity/nodeSelector when calculating pod topology + spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector + are included in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. \n + If this value is nil, the behavior is equivalent to the Honor + policy. This is a beta-level feature default enabled by the + NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node + taints when calculating pod topology spread skew. Options + are: - Honor: nodes without taints, along with tainted nodes + for which the incoming pod has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + \n If this value is nil, the behavior is equivalent to the + Ignore policy. This is a beta-level feature default enabled + by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that + have a label with this key and identical values are considered + to be in the same topology. We consider each + as a "bucket", and try to put balanced number of pods into + each bucket. We define a domain as a particular instance of + a topology. Also, we define an eligible domain as a domain + whose nodes meet the requirements of nodeAffinityPolicy and + nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain of + that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a + pod if it doesn''t satisfy the spread constraint. - DoNotSchedule + (default) tells the scheduler not to schedule it. - ScheduleAnyway + tells the scheduler to schedule the pod in any location, but + giving higher precedence to topologies that would help reduce + the skew. A constraint is considered "Unsatisfiable" for + an incoming pod if and only if every possible node assignment + for that pod would violate "MaxSkew" on some topology. For + example, in a 3-zone cluster, MaxSkew is set to 1, and pods + with the same labelSelector spread as 3/1/1: | zone1 | zone2 + | zone3 | | P P P | P | P | If WhenUnsatisfiable is + set to DoNotSchedule, incoming pod can only be scheduled to + zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on + zone2(zone3) satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make it *more* + imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + version: + description: '(Optional) YDBVersion sets the explicit version of the + YDB image Default: ""' + type: string + volumes: + description: 'Additional volumes that will be mounted into the well-known + directory of every storage pod. Directory: `/opt/ydb/volumes/`. + Only `hostPath` volume type is supported for now.' + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk + resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the + path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to + be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated + CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium + should back this directory. The default is "" which means + to use the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between the + SizeLimit specified here and the sum of memory limits + of all containers in a pod. The default is nil which means + that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use this + if: a) the volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or capacity + \ tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver supports + dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + dataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name + of one entry in pod.spec.resourceClaims + of the Pod where this field is used. + It makes that resource available inside + a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends + on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the plugin + scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as + metadata -> name on the dataset for Flocker should be + considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that + you want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in + GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, the + volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses + an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The + portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal + is either an IP or ip_addr:port if the port is other than + default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in + VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path are + not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no + group + type: string + readOnly: + description: readOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for + a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the + pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - databaseRef + - nodes + - storageClusterRef + type: object + status: + default: + state: Pending + description: DatabaseNodeSetStatus defines the observed state + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + observedDatabaseGeneration: + format: int64 + type: integer + state: + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/ydb-operator/crds/remotestoragenodeset.yaml b/deploy/ydb-operator/crds/remotestoragenodeset.yaml new file mode 100644 index 00000000..75eda050 --- /dev/null +++ b/deploy/ydb-operator/crds/remotestoragenodeset.yaml @@ -0,0 +1,4646 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.1 + creationTimestamp: null + name: remotestoragenodesets.ydb.tech +spec: + group: ydb.tech + names: + kind: RemoteStorageNodeSet + listKind: RemoteStorageNodeSetList + plural: remotestoragenodesets + singular: remotestoragenodeset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The status of this RemoteStorageNodeSet + jsonPath: .status.state + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: RemoteStorageNodeSet declares NodeSet spec and status for objects + in remote cluster + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: StorageNodeSetSpec describes an group nodes of Storage object + properties: + additionalAnnotations: + additionalProperties: + type: string + description: (Optional) Additional custom resource annotations that + are added to all resources + type: object + additionalLabels: + additionalProperties: + type: string + description: (Optional) Additional custom resource labels that are + added to all resources + type: object + affinity: + description: (Optional) If specified, the pod's scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + caBundle: + description: User-defined root certificate authority that is added + to system trust store of Storage pods on startup. + type: string + configuration: + description: YDB configuration in YAML format. Will be applied on + top of generated one in internal/configuration + type: string + dataStore: + description: (Optional) Where cluster data should be kept + items: + description: PersistentVolumeClaimSpec describes the common attributes + of storage devices and allows a Source for provider-specific attributes + properties: + accessModes: + description: 'accessModes contains the desired access modes + the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified + data source. When the AnyVolumeDataSource feature gate is + enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when + dataSourceRef.namespace is not specified. If the namespace + is specified, then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource being + referenced. If APIGroup is not specified, the specified + Kind must be in the core API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object from which + to populate the volume with data, if a non-empty volume is + desired. This may be any object from a non-empty API group + (non core object) or a PersistentVolumeClaim object. When + this field is specified, volume binding will only succeed + if the type of the specified object matches some installed + volume populator or dynamic provisioner. This field will replace + the functionality of the dataSource field and as such if both + fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn''t specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to + the same value automatically if one of them is empty and the + other is non-empty. When namespace is specified in dataSourceRef, + dataSource isn''t set to the same value and must be empty. + There are three important differences between dataSource and + dataSourceRef: * While dataSource only allows two specific + types of objects, dataSourceRef allows any non-core object, + as well as PersistentVolumeClaim objects. * While dataSource + ignores disallowed values (dropping them), dataSourceRef preserves + all values, and generates an error if a disallowed value is specified. + * While dataSource only allows local objects, dataSourceRef + allows objects in any namespaces. (Beta) Using this field + requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires + the CrossNamespaceVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource being + referenced. If APIGroup is not specified, the specified + Kind must be in the core API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: Namespace is the namespace of resource being + referenced Note that when a namespace is specified, a + gateway.networking.k8s.io/ReferenceGrant object is required + in the referent namespace to allow that namespace's owner + to accept the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the + volume should have. If RecoverVolumeExpansionFailure feature + is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher + than capacity recorded in the status field of the claim. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the StorageClass + required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required + by the claim. Value of Filesystem is implied when not included + in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume + backing this claim. + type: string + type: object + type: array + domain: + default: Root + description: '(Optional) Name of the root storage domain Default: + root' + maxLength: 63 + pattern: '[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?' + type: string + erasure: + default: block-4-2 + description: Data storage topology mode For details, see https://ydb.tech/docs/en/cluster/topology + FIXME mirror-3-dc is only supported with external configuration + enum: + - mirror-3-dc + - block-4-2 + - none + type: string + hostNetwork: + description: '(Optional) Whether host network should be enabled. Default: + false' + type: boolean + image: + description: (Optional) Container image information + properties: + name: + description: 'Container image with supported YDB version. This + defaults to the version pinned to the operator and requires + a full container and tag/sha name. For example: cr.yandex/crptqonuodf51kdj7a7d/ydb:22.2.22' + type: string + pullPolicy: + description: '(Optional) PullPolicy for the image, which defaults + to IfNotPresent. Default: IfNotPresent' + type: string + pullSecret: + description: (Optional) Secret name containing the dockerconfig + to use for a registry that requires authentication. The secret + must be configured first by the user. + type: string + type: object + initContainers: + description: '(Optional) List of initialization containers belonging + to the pod. Init containers are executed in order prior to containers + being started. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/' + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not + specifying a port here DOES NOT prevent that port from being + exposed. Any port which is listening on the default "0.0.0.0" + address inside a container will be accessible from the network. + Modifying this array with strategic merge patch may corrupt + the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry + in pod.spec.resourceClaims of the Pod where this + field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must only be set if type + is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components + that enable the WindowsHostProcessContainers feature + flag. Setting this field without the feature flag + will result in errors when validating the Pod. All + of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + monitoring: + description: '(Optional) Monitoring sets configuration options for + YDB observability Default: ""' + properties: + enabled: + type: boolean + interval: + description: Interval at which metrics should be scraped + type: string + metricRelabelings: + description: RelabelConfig allows dynamic rewriting of the label + set, being applied to sample before ingestion. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It defines + ``-section of Prometheus configuration. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular expression + for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + required: + - enabled + type: object + nodeSelector: + additionalProperties: + type: string + description: '(Optional) NodeSelector is a selector which must be + true for the pod to fit on a node. Selector which must match a node''s + labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + nodes: + description: Number of nodes (pods) + format: int32 + type: integer + operatorConnection: + description: '(Optional) Operator connection settings Default: (not + specified)' + properties: + accessToken: + properties: + secretKeyRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + required: + - secretKeyRef + type: object + staticCredentials: + properties: + password: + properties: + secretKeyRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + required: + - secretKeyRef + type: object + username: + type: string + required: + - username + type: object + type: object + operatorSync: + default: true + description: Enables or disables operator's reconcile loop. `false` + means all the Pods are running, but the reconcile is effectively + turned off. `true` means the default state of the system, all Pods + running, operator reacts to specification change of this Storage + resource. + type: boolean + pause: + default: false + description: The state of the Storage processes. `true` means all + the Storage Pods are being killed, but the Storage resource is persisted. + `false` means the default state of the system, all Pods running. + type: boolean + priorityClassName: + description: (Optional) If specified, the pod's priorityClassName. + type: string + resources: + description: '(Optional) Container resource limits. Any container + limits can be specified. Default: (not specified)' + properties: + claims: + description: "Claims lists the names of resources, defined in + spec.resourceClaims, that are used by this container. \n This + is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims + of the Pod where this field is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + secrets: + description: 'Secret names that will be mounted into the well-known + directory of every storage pod. Directory: `/opt/ydb/secrets//`' + items: + description: LocalObjectReference contains enough information to + let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + service: + description: '(Optional) Storage services parameter overrides Default: + (not specified)' + properties: + grpc: + properties: + additionalAnnotations: + additionalProperties: + type: string + type: object + additionalLabels: + additionalProperties: + type: string + type: object + externalHost: + type: string + ipFamilies: + items: + description: IPFamily represents the IP Family (IPv4 or + IPv6). This type is used to express the family of an IP + expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by a Service + type: string + tls: + properties: + CA: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + certificate: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + enabled: + type: boolean + key: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + required: + - enabled + type: object + type: object + interconnect: + properties: + additionalAnnotations: + additionalProperties: + type: string + type: object + additionalLabels: + additionalProperties: + type: string + type: object + ipFamilies: + items: + description: IPFamily represents the IP Family (IPv4 or + IPv6). This type is used to express the family of an IP + expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by a Service + type: string + tls: + properties: + CA: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + certificate: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + enabled: + type: boolean + key: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + required: + - enabled + type: object + type: object + status: + properties: + additionalAnnotations: + additionalProperties: + type: string + type: object + additionalLabels: + additionalProperties: + type: string + type: object + ipFamilies: + items: + description: IPFamily represents the IP Family (IPv4 or + IPv6). This type is used to express the family of an IP + expressed by a type (e.g. service.spec.ipFamilies). + type: string + type: array + ipFamilyPolicy: + description: IPFamilyPolicy represents the dual-stack-ness + requested or required by a Service + type: string + type: object + type: object + storageRef: + description: YDB Storage reference + properties: + name: + maxLength: 63 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + namespace: + maxLength: 63 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + required: + - name + type: object + tolerations: + description: (Optional) If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: (Optional) If specified, the pod's topologySpreadConstraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods + that match this label selector are counted to determine the + number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + matchLabelKeys: + description: MatchLabelKeys is a set of pod label keys to select + the pods over which spreading will be calculated. The keys + are used to lookup values from the incoming pod labels, those + key-value labels are ANDed with labelSelector to select the + group of existing pods over which spreading will be calculated + for the incoming pod. Keys that don't exist in the incoming + pod labels will be ignored. A null or empty list means only + match against labelSelector. + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may + be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods + in an eligible domain or zero if the number of eligible domains + is less than MinDomains. For example, in a 3-zone cluster, + MaxSkew is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum is 1. | + zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew + is 1, incoming pod can only be scheduled to zone3 to become + 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) + on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming + pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that satisfy + it. It''s a required field. Default value is 1 and 0 is not + allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible + domains. When the number of eligible domains with matching + topology keys is less than minDomains, Pod Topology Spread + treats \"global minimum\" as 0, and then the calculation of + Skew is performed. And when the number of eligible domains + with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. As a result, when + the number of eligible domains is less than minDomains, scheduler + won't schedule more than maxSkew Pods to those domains. If + value is nil, the constraint behaves as if MinDomains is equal + to 1. Valid values are integers greater than 0. When value + is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For + example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains + is set to 5 and pods with the same labelSelector spread as + 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | + The number of domains is less than 5(MinDomains), so \"global + minimum\" is treated as 0. In this situation, new pod with + the same labelSelector cannot be scheduled, because computed + skew will be 3(3 - 0) if new Pod is scheduled to any of the + three zones, it will violate MaxSkew. \n This is a beta field + and requires the MinDomainsInPodTopologySpread feature gate + to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat + Pod's nodeAffinity/nodeSelector when calculating pod topology + spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector + are included in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. \n + If this value is nil, the behavior is equivalent to the Honor + policy. This is a beta-level feature default enabled by the + NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node + taints when calculating pod topology spread skew. Options + are: - Honor: nodes without taints, along with tainted nodes + for which the incoming pod has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + \n If this value is nil, the behavior is equivalent to the + Ignore policy. This is a beta-level feature default enabled + by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that + have a label with this key and identical values are considered + to be in the same topology. We consider each + as a "bucket", and try to put balanced number of pods into + each bucket. We define a domain as a particular instance of + a topology. Also, we define an eligible domain as a domain + whose nodes meet the requirements of nodeAffinityPolicy and + nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain of + that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a + pod if it doesn''t satisfy the spread constraint. - DoNotSchedule + (default) tells the scheduler not to schedule it. - ScheduleAnyway + tells the scheduler to schedule the pod in any location, but + giving higher precedence to topologies that would help reduce + the skew. A constraint is considered "Unsatisfiable" for + an incoming pod if and only if every possible node assignment + for that pod would violate "MaxSkew" on some topology. For + example, in a 3-zone cluster, MaxSkew is set to 1, and pods + with the same labelSelector spread as 3/1/1: | zone1 | zone2 + | zone3 | | P P P | P | P | If WhenUnsatisfiable is + set to DoNotSchedule, incoming pod can only be scheduled to + zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on + zone2(zone3) satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make it *more* + imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + version: + description: '(Optional) YDBVersion sets the explicit version of the + YDB image Default: ""' + type: string + volumes: + description: 'Additional volumes that will be mounted into the well-known + directory of every storage pod. Directory: `/opt/ydb/volumes/`. + Only `hostPath` volume type is supported for now.' + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk + resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection + of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the + path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to + be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated + CSI driver which will determine the default filesystem + to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium + should back this directory. The default is "" which means + to use the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage + required for this EmptyDir volume. The size limit is also + applicable for memory medium. The maximum usage on memory + medium EmptyDir would be the minimum value between the + SizeLimit specified here and the sum of memory limits + of all containers in a pod. The default is nil which means + that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use this + if: a) the volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or capacity + \ tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver supports + dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. When the AnyVolumeDataSource feature + gate is enabled, dataSource contents will be copied + to dataSourceRef, and dataSourceRef contents will + be copied to dataSource when dataSourceRef.namespace + is not specified. If the namespace is specified, + then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'dataSourceRef specifies the object + from which to populate the volume with data, if + a non-empty volume is desired. This may be any + object from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + dataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, when namespace isn''t + specified in dataSourceRef, both fields (dataSource + and dataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t set to the + same value and must be empty. There are three + important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types + of objects, dataSourceRef allows any non-core + object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping + them), dataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + * While dataSource only allows local objects, + dataSourceRef allows objects in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using the + namespace field of dataSourceRef requires the + CrossNamespaceVolumeDataSource feature gate to + be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: Namespace is the namespace of resource + being referenced Note that when a namespace + is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant documentation + for details. (Alpha) This field requires the + CrossNamespaceVolumeDataSource feature gate + to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, + defined in spec.resourceClaims, that are used + by this container. \n This is an alpha field + and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable." + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name + of one entry in pod.spec.resourceClaims + of the Pod where this field is used. + It makes that resource available inside + a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'storageClassName is the name of the + StorageClass required by the claim. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends + on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference + to the secret object containing sensitive information + to pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the plugin + scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as + metadata -> name on the dataset for Flocker should be + considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that + you want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that + you want to mount. If omitted, the default is to mount + by volume name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in + GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must + not contain or start with '..'. If '.' is supplied, the + volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses + an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The + portal is either an IP or ip_addr:port if the port is + other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal + is either an IP or ip_addr:port if the port is other than + default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in + VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Directories within the path are + not affected by this setting. This might be in conflict + with other options that affect the file mode, like fsGroup, + and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no + group + type: string + readOnly: + description: readOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'user is the rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for + a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated with + this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to + set permissions on created files by default. Must be an + octal value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to + set permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file + to map the key to. May not be an absolute path. + May not contain the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the + pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must + be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: volumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be + a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - erasure + - nodes + - storageRef + type: object + status: + default: + state: Pending + description: StorageNodeSetStatus defines the observed state + properties: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations + of a foo's current state. \t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\" \t // + +patchMergeKey=type \t // +patchStrategy=merge \t // +listType=map + \t // +listMapKey=type \t Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n \t // other fields + \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + observedStorageGeneration: + format: int64 + type: integer + state: + type: string + required: + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] From 18e53782f7c2522c13f8efbbb7170e0f76a8f3ab Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Wed, 24 Jan 2024 00:36:06 +0300 Subject: [PATCH 02/24] remoteSpec to define region and zone --- api/v1alpha1/common_types.go | 10 ++++++++++ api/v1alpha1/databasenodeset_types.go | 2 +- api/v1alpha1/storagenodeset_types.go | 2 +- api/v1alpha1/zz_generated.deepcopy.go | 25 +++++++++++++++++++++++++ 4 files changed, 37 insertions(+), 2 deletions(-) diff --git a/api/v1alpha1/common_types.go b/api/v1alpha1/common_types.go index 4e39a214..2b2f8b7a 100644 --- a/api/v1alpha1/common_types.go +++ b/api/v1alpha1/common_types.go @@ -34,3 +34,13 @@ type PodImage struct { // +optional PullSecret *string `json:"pullSecret,omitempty"` } + +type RemoteSpec struct { + // (Optional) Remote cloud region to deploy into + // +optional + Region string `json:"region,omitempty"` + + // Remote cloud zone to deploy into + // +required + Zone string `json:"zone"` +} diff --git a/api/v1alpha1/databasenodeset_types.go b/api/v1alpha1/databasenodeset_types.go index d86fd9af..699111f2 100644 --- a/api/v1alpha1/databasenodeset_types.go +++ b/api/v1alpha1/databasenodeset_types.go @@ -32,7 +32,7 @@ type DatabaseNodeSetSpecInline struct { // (Optional) Object should be reference to remote object // +optional - Remote bool `json:"remote,omitempty"` + Remote *RemoteSpec `json:"remote,omitempty"` DatabaseNodeSpec `json:",inline"` } diff --git a/api/v1alpha1/storagenodeset_types.go b/api/v1alpha1/storagenodeset_types.go index 9f2e7d13..ce2ed1fc 100644 --- a/api/v1alpha1/storagenodeset_types.go +++ b/api/v1alpha1/storagenodeset_types.go @@ -32,7 +32,7 @@ type StorageNodeSetSpecInline struct { // (Optional) Object should be reference to remote object // +optional - Remote bool `json:"remote,omitempty"` + Remote *RemoteSpec `json:"remote,omitempty"` StorageNodeSpec `json:",inline"` } diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index ccdaf943..834aea0d 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -395,6 +395,11 @@ func (in *DatabaseNodeSetSpec) DeepCopy() *DatabaseNodeSetSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DatabaseNodeSetSpecInline) DeepCopyInto(out *DatabaseNodeSetSpecInline) { *out = *in + if in.Remote != nil { + in, out := &in.Remote, &out.Remote + *out = new(RemoteSpec) + **out = **in + } in.DatabaseNodeSpec.DeepCopyInto(&out.DatabaseNodeSpec) } @@ -814,6 +819,21 @@ func (in *RemoteDatabaseNodeSetList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteSpec) DeepCopyInto(out *RemoteSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteSpec. +func (in *RemoteSpec) DeepCopy() *RemoteSpec { + if in == nil { + return nil + } + out := new(RemoteSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RemoteStorageNodeSet) DeepCopyInto(out *RemoteStorageNodeSet) { *out = *in @@ -1271,6 +1291,11 @@ func (in *StorageNodeSetSpec) DeepCopy() *StorageNodeSetSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageNodeSetSpecInline) DeepCopyInto(out *StorageNodeSetSpecInline) { *out = *in + if in.Remote != nil { + in, out := &in.Remote, &out.Remote + *out = new(RemoteSpec) + **out = **in + } in.StorageNodeSpec.DeepCopyInto(&out.StorageNodeSpec) } From 7e8b3b2a5268729403539f54d224dfd8e9adc198 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Tue, 23 Jan 2024 19:29:17 +0300 Subject: [PATCH 03/24] processing remoteNodeSet objects in storage/database controllers --- internal/controllers/constants/constants.go | 2 + internal/controllers/database/controller.go | 23 ++++ internal/controllers/database/sync.go | 71 +++++++++--- internal/controllers/storage/controller.go | 22 ++++ internal/controllers/storage/sync.go | 74 +++++++++--- internal/resources/remotedatabasenodeset.go | 122 ++++++++++++++++++++ internal/resources/remotestoragenodeset.go | 121 +++++++++++++++++++ 7 files changed, 400 insertions(+), 35 deletions(-) create mode 100644 internal/resources/remotedatabasenodeset.go create mode 100644 internal/resources/remotestoragenodeset.go diff --git a/internal/controllers/constants/constants.go b/internal/controllers/constants/constants.go index 52289f4c..09a0f63e 100644 --- a/internal/controllers/constants/constants.go +++ b/internal/controllers/constants/constants.go @@ -8,9 +8,11 @@ const ( StoragePausedCondition = "StoragePaused" StorageInitializedCondition = "StorageReady" StorageNodeSetReadyCondition = "StorageNodeSetReady" + StorageNodeSetsSyncedCondition = "StorageNodeSetsSynced" DatabasePausedCondition = "DatabasePaused" DatabaseTenantInitializedCondition = "TenantInitialized" DatabaseNodeSetReadyCondition = "DatabaseNodeSetReady" + DatabaseNodeSetsSyncedCondition = "DatabaseNodeSetsSynced" Stop = true Continue = false diff --git a/internal/controllers/database/controller.go b/internal/controllers/database/controller.go index 092d2ab1..3b4e923e 100644 --- a/internal/controllers/database/controller.go +++ b/internal/controllers/database/controller.go @@ -92,6 +92,28 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { r.Recorder = mgr.GetEventRecorderFor("Database") + if err := mgr.GetFieldIndexer().IndexField( + context.Background(), + &ydbv1alpha1.RemoteDatabaseNodeSet{}, + OwnerControllerKey, + func(obj client.Object) []string { + // grab the RemoteDatabaseNodeSet object, extract the owner... + remoteDatabaseNodeSet := obj.(*ydbv1alpha1.RemoteDatabaseNodeSet) + owner := metav1.GetControllerOf(remoteDatabaseNodeSet) + if owner == nil { + return nil + } + // ...make sure it's a Database... + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != "Database" { + return nil + } + + // ...and if so, return it + return []string{owner.Name} + }); err != nil { + return err + } + if err := mgr.GetFieldIndexer().IndexField( context.Background(), &ydbv1alpha1.DatabaseNodeSet{}, @@ -115,6 +137,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { } return controller. + Owns(&ydbv1alpha1.RemoteDatabaseNodeSet{}). Owns(&ydbv1alpha1.DatabaseNodeSet{}). Owns(&corev1.Service{}). Owns(&appsv1.StatefulSet{}). diff --git a/internal/controllers/database/sync.go b/internal/controllers/database/sync.go index f76d0aee..d326703c 100644 --- a/internal/controllers/database/sync.go +++ b/internal/controllers/database/sync.go @@ -461,11 +461,11 @@ func (r *Reconciler) syncNodeSetSpecInline( database *resources.DatabaseBuilder, ) (bool, ctrl.Result, error) { r.Log.Info("running step syncNodeSetSpecInline") - - databaseNodeSets := &v1alpha1.DatabaseNodeSetList{} matchingFields := client.MatchingFields{ OwnerControllerKey: database.Name, } + + databaseNodeSets := &v1alpha1.DatabaseNodeSetList{} if err := r.List(ctx, databaseNodeSets, client.InNamespace(database.Namespace), matchingFields, @@ -483,10 +483,12 @@ func (r *Reconciler) syncNodeSetSpecInline( databaseNodeSet := databaseNodeSet.DeepCopy() isFoundDatabaseNodeSetSpecInline := false for _, nodeSetSpecInline := range database.Spec.NodeSets { - databaseNodeSetName := database.Name + "-" + nodeSetSpecInline.Name - if databaseNodeSet.Name == databaseNodeSetName { - isFoundDatabaseNodeSetSpecInline = true - break + if !nodeSetSpecInline.Remote { + nodeSetName := database.Name + "-" + nodeSetSpecInline.Name + if databaseNodeSet.Name == nodeSetName { + isFoundDatabaseNodeSetSpecInline = true + break + } } } if !isFoundDatabaseNodeSetSpecInline { @@ -509,28 +511,65 @@ func (r *Reconciler) syncNodeSetSpecInline( databaseNodeSet.Name), ) } + } - oldGeneration := databaseNodeSet.Status.ObservedDatabaseGeneration - if oldGeneration != database.Generation { - databaseNodeSet.Status.ObservedDatabaseGeneration = database.Generation - if err := r.Status().Update(ctx, databaseNodeSet); err != nil { + remoteDatabaseNodeSets := &v1alpha1.RemoteDatabaseNodeSetList{} + if err := r.List(ctx, remoteDatabaseNodeSets, + client.InNamespace(database.Namespace), + matchingFields, + ); err != nil { + r.Recorder.Event( + database, + corev1.EventTypeWarning, + "ProvisioningFailed", + fmt.Sprintf("Failed to list RemoteDatabaseNodeSets: %s", err), + ) + return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } + + for _, remoteDatabaseNodeSet := range remoteDatabaseNodeSets.Items { + remoteDatabaseNodeSet := remoteDatabaseNodeSet.DeepCopy() + isFoundRemoteDatabaseNodeSetSpecInline := false + for _, nodeSetSpecInline := range database.Spec.NodeSets { + if nodeSetSpecInline.Remote { + nodeSetName := database.Name + "-" + nodeSetSpecInline.Name + if remoteDatabaseNodeSet.Name == nodeSetName { + isFoundRemoteDatabaseNodeSetSpecInline = true + break + } + } + } + + if !isFoundRemoteDatabaseNodeSetSpecInline { + if err := r.Delete(ctx, remoteDatabaseNodeSet); err != nil { r.Recorder.Event( - databaseNodeSet, + database, corev1.EventTypeWarning, - "ControllerError", - fmt.Sprintf("Failed setting status: %s", err), + "ProvisioningFailed", + fmt.Sprintf("Failed to delete RemoteDatabaseNodeSet: %s", err), ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } r.Recorder.Event( - databaseNodeSet, + database, corev1.EventTypeNormal, - "StatusChanged", - fmt.Sprintf("DatabaseNodeSet updated observedStorageGeneration from %d to %d", oldGeneration, database.Generation), + "Syncing", + fmt.Sprintf("Resource: %s, Namespace: %s, Name: %s, deleted", + reflect.TypeOf(remoteDatabaseNodeSet), + remoteDatabaseNodeSet.Namespace, + remoteDatabaseNodeSet.Name), ) } } + meta.SetStatusCondition(&database.Status.Conditions, metav1.Condition{ + Type: DatabaseNodeSetsSyncedCondition, + Status: "True", + ObservedGeneration: database.Generation, + Reason: ReasonCompleted, + Message: "Synced (Remote)DatabaseNodeSets with Database spec", + }) + r.Log.Info("syncNodeSetSpecInline complete") return Continue, ctrl.Result{Requeue: false}, nil } diff --git a/internal/controllers/storage/controller.go b/internal/controllers/storage/controller.go index c8f168c0..9c4f0f87 100644 --- a/internal/controllers/storage/controller.go +++ b/internal/controllers/storage/controller.go @@ -102,6 +102,28 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&monitoringv1.ServiceMonitor{}) } + if err := mgr.GetFieldIndexer().IndexField( + context.Background(), + &ydbv1alpha1.RemoteStorageNodeSet{}, + OwnerControllerKey, + func(obj client.Object) []string { + // grab the RemoteStorageNodeSet object, extract the owner... + remoteStorageNodeSet := obj.(*ydbv1alpha1.RemoteStorageNodeSet) + owner := metav1.GetControllerOf(remoteStorageNodeSet) + if owner == nil { + return nil + } + // ...make sure it's a Storage... + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != "Storage" { + return nil + } + + // ...and if so, return it + return []string{owner.Name} + }); err != nil { + return err + } + if err := mgr.GetFieldIndexer().IndexField( context.Background(), &ydbv1alpha1.StorageNodeSet{}, diff --git a/internal/controllers/storage/sync.go b/internal/controllers/storage/sync.go index bd926fa3..f246c4b7 100644 --- a/internal/controllers/storage/sync.go +++ b/internal/controllers/storage/sync.go @@ -294,11 +294,11 @@ func (r *Reconciler) syncNodeSetSpecInline( storage *resources.StorageClusterBuilder, ) (bool, ctrl.Result, error) { r.Log.Info("running step syncNodeSetSpecInline") - - storageNodeSets := &ydbv1alpha1.StorageNodeSetList{} matchingFields := client.MatchingFields{ OwnerControllerKey: storage.Name, } + + storageNodeSets := &ydbv1alpha1.StorageNodeSetList{} if err := r.List(ctx, storageNodeSets, client.InNamespace(storage.Namespace), matchingFields, @@ -316,10 +316,12 @@ func (r *Reconciler) syncNodeSetSpecInline( storageNodeSet := storageNodeSet.DeepCopy() isFoundStorageNodeSetSpecInline := false for _, nodeSetSpecInline := range storage.Spec.NodeSets { - nodeSetName := storage.Name + "-" + nodeSetSpecInline.Name - if storageNodeSet.Name == nodeSetName { - isFoundStorageNodeSetSpecInline = true - break + if !nodeSetSpecInline.Remote { + nodeSetName := storage.Name + "-" + nodeSetSpecInline.Name + if storageNodeSet.Name == nodeSetName { + isFoundStorageNodeSetSpecInline = true + break + } } } @@ -343,31 +345,65 @@ func (r *Reconciler) syncNodeSetSpecInline( storageNodeSet.Name), ) } + } - oldGeneration := storageNodeSet.Status.ObservedStorageGeneration - if oldGeneration != storage.Generation { - storageNodeSet.Status.ObservedStorageGeneration = storage.Generation - if err := r.Status().Update(ctx, storageNodeSet); err != nil { + remoteStorageNodeSets := &ydbv1alpha1.RemoteStorageNodeSetList{} + if err := r.List(ctx, remoteStorageNodeSets, + client.InNamespace(storage.Namespace), + matchingFields, + ); err != nil { + r.Recorder.Event( + storage, + corev1.EventTypeWarning, + "ProvisioningFailed", + fmt.Sprintf("Failed to list RemoteStorageNodeSets: %s", err), + ) + return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } + + for _, remoteStorageNodeSet := range remoteStorageNodeSets.Items { + remoteStorageNodeSet := remoteStorageNodeSet.DeepCopy() + isFoundRemoteStorageNodeSetSpecInline := false + for _, nodeSetSpecInline := range storage.Spec.NodeSets { + if nodeSetSpecInline.Remote { + nodeSetName := storage.Name + "-" + nodeSetSpecInline.Name + if remoteStorageNodeSet.Name == nodeSetName { + isFoundRemoteStorageNodeSetSpecInline = true + break + } + } + } + + if !isFoundRemoteStorageNodeSetSpecInline { + if err := r.Delete(ctx, remoteStorageNodeSet); err != nil { r.Recorder.Event( - storageNodeSet, + storage, corev1.EventTypeWarning, - "ControllerError", - fmt.Sprintf("Failed setting status: %s", err), + "ProvisioningFailed", + fmt.Sprintf("Failed to delete RemoteStorageNodeSet: %s", err), ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } r.Recorder.Event( - storageNodeSet, + storage, corev1.EventTypeNormal, - "StatusChanged", - fmt.Sprintf( - "StorageNodeSet updated observedStorageGeneration from %d to %d", - oldGeneration, - storage.Generation), + "Syncing", + fmt.Sprintf("Resource: %s, Namespace: %s, Name: %s, deleted", + reflect.TypeOf(remoteStorageNodeSet), + remoteStorageNodeSet.Namespace, + remoteStorageNodeSet.Name), ) } } + meta.SetStatusCondition(&storage.Status.Conditions, metav1.Condition{ + Type: StorageNodeSetsSyncedCondition, + Status: "True", + ObservedGeneration: storage.Generation, + Reason: ReasonCompleted, + Message: "Synced (Remote)StorageNodeSets with Storage spec", + }) + r.Log.Info("syncNodeSetSpecInline complete") return Continue, ctrl.Result{Requeue: false}, nil } diff --git a/internal/resources/remotedatabasenodeset.go b/internal/resources/remotedatabasenodeset.go new file mode 100644 index 00000000..a23690ed --- /dev/null +++ b/internal/resources/remotedatabasenodeset.go @@ -0,0 +1,122 @@ +package resources + +import ( + "errors" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" + . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck +) + +type RemoteDatabaseNodeSetBuilder struct { + client.Object + + Name string + Labels map[string]string + + DatabaseNodeSetSpec api.DatabaseNodeSetSpec +} + +type RemoteDatabaseNodeSetResource struct { + *api.RemoteDatabaseNodeSet +} + +func (b *RemoteDatabaseNodeSetBuilder) Build(obj client.Object) error { + dns, ok := obj.(*api.RemoteDatabaseNodeSet) + if !ok { + return errors.New("failed to cast to RemoteDatabaseNodeSet object") + } + + if dns.ObjectMeta.Name == "" { + dns.ObjectMeta.Name = b.Name + } + dns.ObjectMeta.Namespace = b.GetNamespace() + + dns.ObjectMeta.Labels = b.Labels + dns.Spec = b.DatabaseNodeSetSpec + + return nil +} + +func (b *RemoteDatabaseNodeSetBuilder) Placeholder(cr client.Object) client.Object { + return &api.DatabaseNodeSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.Name, + Namespace: cr.GetNamespace(), + }, + } +} + +func (b *RemoteDatabaseNodeSetResource) GetResourceBuilders(restConfig *rest.Config) []ResourceBuilder { + database := b.recastRemoteDatabaseNodeSet() + + var resourceBuilders []ResourceBuilder + resourceBuilders = append(resourceBuilders, + &DatabaseStatefulSetBuilder{ + Database: database.DeepCopy(), + RestConfig: restConfig, + + Name: b.Name, + Labels: b.Labels, + StorageEndpoint: b.Spec.StorageEndpoint, + }, + &ConfigMapBuilder{ + Object: b, + + Name: b.Name, + Data: map[string]string{ + api.ConfigFileName: b.Spec.Configuration, + }, + Labels: b.Labels, + }, + ) + return resourceBuilders +} + +func NewRemoteDatabaseNodeSet(remoteDatabaseNodeSet *api.RemoteDatabaseNodeSet) RemoteDatabaseNodeSetResource { + crRemoteDatabaseNodeSet := remoteDatabaseNodeSet.DeepCopy() + + return RemoteDatabaseNodeSetResource{RemoteDatabaseNodeSet: crRemoteDatabaseNodeSet} +} + +func (b *RemoteDatabaseNodeSetResource) SetStatusOnFirstReconcile() (bool, ctrl.Result, error) { + if b.Status.Conditions == nil { + b.Status.Conditions = []metav1.Condition{} + + if b.Spec.Pause { + meta.SetStatusCondition(&b.Status.Conditions, metav1.Condition{ + Type: DatabasePausedCondition, + Status: "False", + Reason: ReasonInProgress, + Message: "Transitioning RemoteDatabaseNodeSet to Paused state", + }) + + return Stop, ctrl.Result{RequeueAfter: StatusUpdateRequeueDelay}, nil + } + } + + return Continue, ctrl.Result{}, nil +} + +func (b *RemoteDatabaseNodeSetResource) Unwrap() *api.RemoteDatabaseNodeSet { + return b.DeepCopy() +} + +func (b *RemoteDatabaseNodeSetResource) recastRemoteDatabaseNodeSet() *api.Database { + return &api.Database{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.RemoteDatabaseNodeSet.Spec.DatabaseRef.Name, + Namespace: b.RemoteDatabaseNodeSet.Spec.DatabaseRef.Namespace, + Labels: b.RemoteDatabaseNodeSet.Labels, + }, + Spec: api.DatabaseSpec{ + DatabaseClusterSpec: b.Spec.DatabaseClusterSpec, + DatabaseNodeSpec: b.Spec.DatabaseNodeSpec, + }, + } +} diff --git a/internal/resources/remotestoragenodeset.go b/internal/resources/remotestoragenodeset.go new file mode 100644 index 00000000..6a5a04c5 --- /dev/null +++ b/internal/resources/remotestoragenodeset.go @@ -0,0 +1,121 @@ +package resources + +import ( + "errors" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" + . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck +) + +type RemoteStorageNodeSetBuilder struct { + client.Object + + Name string + Labels map[string]string + + StorageNodeSetSpec api.StorageNodeSetSpec +} + +type RemoteStorageNodeSetResource struct { + *api.RemoteStorageNodeSet +} + +func (b *RemoteStorageNodeSetBuilder) Build(obj client.Object) error { + dns, ok := obj.(*api.RemoteStorageNodeSet) + if !ok { + return errors.New("failed to cast to RemoteStorageNodeSet object") + } + + if dns.ObjectMeta.Name == "" { + dns.ObjectMeta.Name = b.Name + } + dns.ObjectMeta.Namespace = b.GetNamespace() + + dns.ObjectMeta.Labels = b.Labels + dns.Spec = b.StorageNodeSetSpec + + return nil +} + +func (b *RemoteStorageNodeSetBuilder) Placeholder(cr client.Object) client.Object { + return &api.RemoteStorageNodeSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.Name, + Namespace: cr.GetNamespace(), + }, + } +} + +func (b *RemoteStorageNodeSetResource) GetResourceBuilders(restConfig *rest.Config) []ResourceBuilder { + Storage := b.recastRemoteStorageNodeSet() + + var resourceBuilders []ResourceBuilder + resourceBuilders = append(resourceBuilders, + &StorageStatefulSetBuilder{ + Storage: Storage.DeepCopy(), + RestConfig: restConfig, + + Name: b.Name, + Labels: b.Labels, + }, + &ConfigMapBuilder{ + Object: b, + + Name: b.Name, + Data: map[string]string{ + api.ConfigFileName: b.Spec.Configuration, + }, + Labels: b.Labels, + }, + ) + return resourceBuilders +} + +func NewRemoteStorageNodeSet(remoteStorageNodeSet *api.RemoteStorageNodeSet) RemoteStorageNodeSetResource { + crRemoteStorageNodeSet := remoteStorageNodeSet.DeepCopy() + + return RemoteStorageNodeSetResource{RemoteStorageNodeSet: crRemoteStorageNodeSet} +} + +func (b *RemoteStorageNodeSetResource) SetStatusOnFirstReconcile() (bool, ctrl.Result, error) { + if b.Status.Conditions == nil { + b.Status.Conditions = []metav1.Condition{} + + if b.Spec.Pause { + meta.SetStatusCondition(&b.Status.Conditions, metav1.Condition{ + Type: StoragePausedCondition, + Status: "False", + Reason: ReasonInProgress, + Message: "Transitioning RemoteStorageNodeSet to Paused state", + }) + + return Stop, ctrl.Result{RequeueAfter: StatusUpdateRequeueDelay}, nil + } + } + + return Continue, ctrl.Result{}, nil +} + +func (b *RemoteStorageNodeSetResource) Unwrap() *api.RemoteStorageNodeSet { + return b.DeepCopy() +} + +func (b *RemoteStorageNodeSetResource) recastRemoteStorageNodeSet() *api.Storage { + return &api.Storage{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.RemoteStorageNodeSet.Spec.StorageRef.Name, + Namespace: b.RemoteStorageNodeSet.Spec.StorageRef.Namespace, + Labels: b.RemoteStorageNodeSet.Labels, + }, + Spec: api.StorageSpec{ + StorageClusterSpec: b.Spec.StorageClusterSpec, + StorageNodeSpec: b.Spec.StorageNodeSpec, + }, + } +} From 3c27d7a2c88451bdf541743f09f9f212da5c80cc Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Wed, 24 Jan 2024 11:59:40 +0300 Subject: [PATCH 04/24] append remoteNodeSet builders in storage/database resources --- internal/controllers/database/sync.go | 4 +-- internal/controllers/storage/sync.go | 4 +-- internal/labels/label.go | 6 ++-- internal/resources/database.go | 46 ++++++++++++++++++++------- internal/resources/storage.go | 45 +++++++++++++++++++------- 5 files changed, 77 insertions(+), 28 deletions(-) diff --git a/internal/controllers/database/sync.go b/internal/controllers/database/sync.go index d326703c..7a213ed3 100644 --- a/internal/controllers/database/sync.go +++ b/internal/controllers/database/sync.go @@ -483,7 +483,7 @@ func (r *Reconciler) syncNodeSetSpecInline( databaseNodeSet := databaseNodeSet.DeepCopy() isFoundDatabaseNodeSetSpecInline := false for _, nodeSetSpecInline := range database.Spec.NodeSets { - if !nodeSetSpecInline.Remote { + if nodeSetSpecInline.Remote == nil { nodeSetName := database.Name + "-" + nodeSetSpecInline.Name if databaseNodeSet.Name == nodeSetName { isFoundDatabaseNodeSetSpecInline = true @@ -531,7 +531,7 @@ func (r *Reconciler) syncNodeSetSpecInline( remoteDatabaseNodeSet := remoteDatabaseNodeSet.DeepCopy() isFoundRemoteDatabaseNodeSetSpecInline := false for _, nodeSetSpecInline := range database.Spec.NodeSets { - if nodeSetSpecInline.Remote { + if nodeSetSpecInline.Remote != nil { nodeSetName := database.Name + "-" + nodeSetSpecInline.Name if remoteDatabaseNodeSet.Name == nodeSetName { isFoundRemoteDatabaseNodeSetSpecInline = true diff --git a/internal/controllers/storage/sync.go b/internal/controllers/storage/sync.go index f246c4b7..64f49166 100644 --- a/internal/controllers/storage/sync.go +++ b/internal/controllers/storage/sync.go @@ -316,7 +316,7 @@ func (r *Reconciler) syncNodeSetSpecInline( storageNodeSet := storageNodeSet.DeepCopy() isFoundStorageNodeSetSpecInline := false for _, nodeSetSpecInline := range storage.Spec.NodeSets { - if !nodeSetSpecInline.Remote { + if nodeSetSpecInline.Remote == nil { nodeSetName := storage.Name + "-" + nodeSetSpecInline.Name if storageNodeSet.Name == nodeSetName { isFoundStorageNodeSetSpecInline = true @@ -365,7 +365,7 @@ func (r *Reconciler) syncNodeSetSpecInline( remoteStorageNodeSet := remoteStorageNodeSet.DeepCopy() isFoundRemoteStorageNodeSetSpecInline := false for _, nodeSetSpecInline := range storage.Spec.NodeSets { - if nodeSetSpecInline.Remote { + if nodeSetSpecInline.Remote != nil { nodeSetName := storage.Name + "-" + nodeSetSpecInline.Name if remoteStorageNodeSet.Name == nodeSetName { isFoundRemoteStorageNodeSetSpecInline = true diff --git a/internal/labels/label.go b/internal/labels/label.go index 25422fa5..46417afd 100644 --- a/internal/labels/label.go +++ b/internal/labels/label.go @@ -16,13 +16,15 @@ const ( PartOfKey = "app.kubernetes.io/part-of" // ManagedByKey The tool being used to manage the operation of an application ManagedByKey = "app.kubernetes.io/managed-by" + // TopologyZoneKey A zone represents a logical failure domain + TopologyZoneKey = "topology.kubernetes.io/zone" + // TopologyRegionKey A region represents a larger domain, made up of one or more zones + TopologyRegionKey = "topology.kubernetes.io/region" // ServiceComponent The specialization of a Service resource ServiceComponent = "ydb.tech/service-for" - // StorageNodeSetComponent The specialization of a StorageNodeSet resource StorageNodeSetComponent = "ydb.tech/storage-nodeset" - // DatabaseNodeSetComponent The specialization of a DatabaseNodeSet resource DatabaseNodeSetComponent = "ydb.tech/database-nodeset" diff --git a/internal/resources/database.go b/internal/resources/database.go index 2e0e350c..0494a43a 100644 --- a/internal/resources/database.go +++ b/internal/resources/database.go @@ -196,17 +196,41 @@ func (b *DatabaseBuilder) GetResourceBuilders(restConfig *rest.Config) []Resourc nodeSetLabels = nodeSetLabels.Merge(nodeSetSpecInline.AdditionalLabels) nodeSetLabels = nodeSetLabels.Merge(map[string]string{labels.DatabaseNodeSetComponent: nodeSetSpecInline.Name}) - optionalBuilders = append( - optionalBuilders, - &DatabaseNodeSetBuilder{ - Object: b, - - Name: b.Name + "-" + nodeSetSpecInline.Name, - Labels: nodeSetLabels, - - DatabaseNodeSetSpec: b.recastDatabaseNodeSetSpecInline(nodeSetSpecInline.DeepCopy()), - }, - ) + databaseNodeSetSpec := b.recastDatabaseNodeSetSpecInline(nodeSetSpecInline.DeepCopy()) + + if nodeSetSpecInline.Remote != nil { + if nodeSetSpecInline.Remote.Region != "" { + nodeSetLabels = nodeSetLabels.Merge(map[string]string{ + labels.TopologyRegionKey: nodeSetSpecInline.Remote.Region, + }) + } + nodeSetLabels = nodeSetLabels.Merge(map[string]string{ + labels.TopologyZoneKey: nodeSetSpecInline.Remote.Zone, + }) + optionalBuilders = append( + optionalBuilders, + &RemoteDatabaseNodeSetBuilder{ + Object: b, + + Name: b.Name + "-" + nodeSetSpecInline.Name, + Labels: nodeSetLabels, + + DatabaseNodeSetSpec: databaseNodeSetSpec, + }, + ) + } else { + optionalBuilders = append( + optionalBuilders, + &DatabaseNodeSetBuilder{ + Object: b, + + Name: b.Name + "-" + nodeSetSpecInline.Name, + Labels: nodeSetLabels, + + DatabaseNodeSetSpec: databaseNodeSetSpec, + }, + ) + } } } diff --git a/internal/resources/storage.go b/internal/resources/storage.go index e8c3e0aa..ff445d6a 100644 --- a/internal/resources/storage.go +++ b/internal/resources/storage.go @@ -107,17 +107,40 @@ func (b *StorageClusterBuilder) GetResourceBuilders(restConfig *rest.Config) []R nodeSetLabels = nodeSetLabels.Merge(nodeSetSpecInline.AdditionalLabels) nodeSetLabels = nodeSetLabels.Merge(map[string]string{labels.StorageNodeSetComponent: nodeSetSpecInline.Name}) - optionalBuilders = append( - optionalBuilders, - &StorageNodeSetBuilder{ - Object: b, - - Name: b.Name + "-" + nodeSetSpecInline.Name, - Labels: nodeSetLabels, - - StorageNodeSetSpec: b.recastStorageNodeSetSpecInline(nodeSetSpecInline.DeepCopy()), - }, - ) + storageNodeSetSpec := b.recastStorageNodeSetSpecInline(nodeSetSpecInline.DeepCopy()) + if nodeSetSpecInline.Remote != nil { + if nodeSetSpecInline.Remote.Region != "" { + nodeSetLabels = nodeSetLabels.Merge(map[string]string{ + labels.TopologyRegionKey: nodeSetSpecInline.Remote.Region, + }) + } + nodeSetLabels = nodeSetLabels.Merge(map[string]string{ + labels.TopologyZoneKey: nodeSetSpecInline.Remote.Zone, + }) + optionalBuilders = append( + optionalBuilders, + &RemoteStorageNodeSetBuilder{ + Object: b, + + Name: b.Name + "-" + nodeSetSpecInline.Name, + Labels: nodeSetLabels, + + StorageNodeSetSpec: storageNodeSetSpec, + }, + ) + } else { + optionalBuilders = append( + optionalBuilders, + &StorageNodeSetBuilder{ + Object: b, + + Name: b.Name + "-" + nodeSetSpecInline.Name, + Labels: nodeSetLabels, + + StorageNodeSetSpec: storageNodeSetSpec, + }, + ) + } } } From a29603f0f4a341c9e726f7d2af19ba9ac0a30336 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Fri, 26 Jan 2024 12:35:07 +0300 Subject: [PATCH 05/24] fix resourceBuilders for remote resources --- internal/controllers/storage/controller.go | 1 + internal/resources/remotedatabasenodeset.go | 38 +++------------------ internal/resources/remotestoragenodeset.go | 37 +++----------------- 3 files changed, 9 insertions(+), 67 deletions(-) diff --git a/internal/controllers/storage/controller.go b/internal/controllers/storage/controller.go index 9c4f0f87..1157741f 100644 --- a/internal/controllers/storage/controller.go +++ b/internal/controllers/storage/controller.go @@ -147,6 +147,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { } return controller. + Owns(&ydbv1alpha1.RemoteStorageNodeSet{}). Owns(&ydbv1alpha1.StorageNodeSet{}). Owns(&corev1.Service{}). Owns(&appsv1.StatefulSet{}). diff --git a/internal/resources/remotedatabasenodeset.go b/internal/resources/remotedatabasenodeset.go index a23690ed..8984879c 100644 --- a/internal/resources/remotedatabasenodeset.go +++ b/internal/resources/remotedatabasenodeset.go @@ -53,26 +53,14 @@ func (b *RemoteDatabaseNodeSetBuilder) Placeholder(cr client.Object) client.Obje } func (b *RemoteDatabaseNodeSetResource) GetResourceBuilders(restConfig *rest.Config) []ResourceBuilder { - database := b.recastRemoteDatabaseNodeSet() - var resourceBuilders []ResourceBuilder resourceBuilders = append(resourceBuilders, - &DatabaseStatefulSetBuilder{ - Database: database.DeepCopy(), - RestConfig: restConfig, - - Name: b.Name, - Labels: b.Labels, - StorageEndpoint: b.Spec.StorageEndpoint, - }, - &ConfigMapBuilder{ + &DatabaseNodeSetBuilder{ Object: b, - Name: b.Name, - Data: map[string]string{ - api.ConfigFileName: b.Spec.Configuration, - }, - Labels: b.Labels, + Name: b.Name, + Labels: b.Labels, + DatabaseNodeSetSpec: b.Spec, }, ) return resourceBuilders @@ -102,21 +90,3 @@ func (b *RemoteDatabaseNodeSetResource) SetStatusOnFirstReconcile() (bool, ctrl. return Continue, ctrl.Result{}, nil } - -func (b *RemoteDatabaseNodeSetResource) Unwrap() *api.RemoteDatabaseNodeSet { - return b.DeepCopy() -} - -func (b *RemoteDatabaseNodeSetResource) recastRemoteDatabaseNodeSet() *api.Database { - return &api.Database{ - ObjectMeta: metav1.ObjectMeta{ - Name: b.RemoteDatabaseNodeSet.Spec.DatabaseRef.Name, - Namespace: b.RemoteDatabaseNodeSet.Spec.DatabaseRef.Namespace, - Labels: b.RemoteDatabaseNodeSet.Labels, - }, - Spec: api.DatabaseSpec{ - DatabaseClusterSpec: b.Spec.DatabaseClusterSpec, - DatabaseNodeSpec: b.Spec.DatabaseNodeSpec, - }, - } -} diff --git a/internal/resources/remotestoragenodeset.go b/internal/resources/remotestoragenodeset.go index 6a5a04c5..a7a5fc99 100644 --- a/internal/resources/remotestoragenodeset.go +++ b/internal/resources/remotestoragenodeset.go @@ -53,25 +53,14 @@ func (b *RemoteStorageNodeSetBuilder) Placeholder(cr client.Object) client.Objec } func (b *RemoteStorageNodeSetResource) GetResourceBuilders(restConfig *rest.Config) []ResourceBuilder { - Storage := b.recastRemoteStorageNodeSet() - var resourceBuilders []ResourceBuilder resourceBuilders = append(resourceBuilders, - &StorageStatefulSetBuilder{ - Storage: Storage.DeepCopy(), - RestConfig: restConfig, - - Name: b.Name, - Labels: b.Labels, - }, - &ConfigMapBuilder{ + &StorageNodeSetBuilder{ Object: b, - Name: b.Name, - Data: map[string]string{ - api.ConfigFileName: b.Spec.Configuration, - }, - Labels: b.Labels, + Name: b.Name, + Labels: b.Labels, + StorageNodeSetSpec: b.Spec, }, ) return resourceBuilders @@ -101,21 +90,3 @@ func (b *RemoteStorageNodeSetResource) SetStatusOnFirstReconcile() (bool, ctrl.R return Continue, ctrl.Result{}, nil } - -func (b *RemoteStorageNodeSetResource) Unwrap() *api.RemoteStorageNodeSet { - return b.DeepCopy() -} - -func (b *RemoteStorageNodeSetResource) recastRemoteStorageNodeSet() *api.Storage { - return &api.Storage{ - ObjectMeta: metav1.ObjectMeta{ - Name: b.RemoteStorageNodeSet.Spec.StorageRef.Name, - Namespace: b.RemoteStorageNodeSet.Spec.StorageRef.Namespace, - Labels: b.RemoteStorageNodeSet.Labels, - }, - Spec: api.StorageSpec{ - StorageClusterSpec: b.Spec.StorageClusterSpec, - StorageNodeSpec: b.Spec.StorageNodeSpec, - }, - } -} From 030edf2b705217f17a26dc7ce553831db7148b59 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Tue, 6 Feb 2024 13:46:47 +0300 Subject: [PATCH 06/24] fix nested if --- internal/resources/database.go | 84 +++++++++++++++++--------------- internal/resources/storage.go | 88 ++++++++++++++++++---------------- 2 files changed, 94 insertions(+), 78 deletions(-) diff --git a/internal/resources/database.go b/internal/resources/database.go index 0494a43a..4b640b91 100644 --- a/internal/resources/database.go +++ b/internal/resources/database.go @@ -191,50 +191,58 @@ func (b *DatabaseBuilder) GetResourceBuilders(restConfig *rest.Config) []Resourc }, ) } else { - for _, nodeSetSpecInline := range b.Spec.NodeSets { - nodeSetLabels := databaseLabels.Copy() - nodeSetLabels = nodeSetLabels.Merge(nodeSetSpecInline.AdditionalLabels) - nodeSetLabels = nodeSetLabels.Merge(map[string]string{labels.DatabaseNodeSetComponent: nodeSetSpecInline.Name}) - - databaseNodeSetSpec := b.recastDatabaseNodeSetSpecInline(nodeSetSpecInline.DeepCopy()) - - if nodeSetSpecInline.Remote != nil { - if nodeSetSpecInline.Remote.Region != "" { - nodeSetLabels = nodeSetLabels.Merge(map[string]string{ - labels.TopologyRegionKey: nodeSetSpecInline.Remote.Region, - }) - } + optionalBuilders = append(optionalBuilders, b.getNodeSetBuilders(databaseLabels)...) + } + + return optionalBuilders +} + +func (b *DatabaseBuilder) getNodeSetBuilders(databaseLabels labels.Labels) []ResourceBuilder { + var nodeSetBuilders []ResourceBuilder + + for _, nodeSetSpecInline := range b.Spec.NodeSets { + nodeSetLabels := databaseLabels.Copy() + nodeSetLabels = nodeSetLabels.Merge(nodeSetSpecInline.AdditionalLabels) + nodeSetLabels = nodeSetLabels.Merge(map[string]string{labels.DatabaseNodeSetComponent: nodeSetSpecInline.Name}) + + databaseNodeSetSpec := b.recastDatabaseNodeSetSpecInline(nodeSetSpecInline.DeepCopy()) + + if nodeSetSpecInline.Remote != nil { + if nodeSetSpecInline.Remote.Region != "" { nodeSetLabels = nodeSetLabels.Merge(map[string]string{ - labels.TopologyZoneKey: nodeSetSpecInline.Remote.Zone, + labels.TopologyRegionKey: nodeSetSpecInline.Remote.Region, }) - optionalBuilders = append( - optionalBuilders, - &RemoteDatabaseNodeSetBuilder{ - Object: b, - - Name: b.Name + "-" + nodeSetSpecInline.Name, - Labels: nodeSetLabels, - - DatabaseNodeSetSpec: databaseNodeSetSpec, - }, - ) - } else { - optionalBuilders = append( - optionalBuilders, - &DatabaseNodeSetBuilder{ - Object: b, - - Name: b.Name + "-" + nodeSetSpecInline.Name, - Labels: nodeSetLabels, - - DatabaseNodeSetSpec: databaseNodeSetSpec, - }, - ) } + nodeSetLabels = nodeSetLabels.Merge(map[string]string{ + labels.TopologyZoneKey: nodeSetSpecInline.Remote.Zone, + }) + nodeSetBuilders = append( + nodeSetBuilders, + &RemoteDatabaseNodeSetBuilder{ + Object: b, + + Name: b.Name + "-" + nodeSetSpecInline.Name, + Labels: nodeSetLabels, + + DatabaseNodeSetSpec: databaseNodeSetSpec, + }, + ) + } else { + nodeSetBuilders = append( + nodeSetBuilders, + &DatabaseNodeSetBuilder{ + Object: b, + + Name: b.Name + "-" + nodeSetSpecInline.Name, + Labels: nodeSetLabels, + + DatabaseNodeSetSpec: databaseNodeSetSpec, + }, + ) } } - return optionalBuilders + return nodeSetBuilders } func (b *DatabaseBuilder) recastDatabaseNodeSetSpecInline(nodeSetSpecInline *api.DatabaseNodeSetSpecInline) api.DatabaseNodeSetSpec { diff --git a/internal/resources/storage.go b/internal/resources/storage.go index ff445d6a..8442c2e2 100644 --- a/internal/resources/storage.go +++ b/internal/resources/storage.go @@ -102,46 +102,7 @@ func (b *StorageClusterBuilder) GetResourceBuilders(restConfig *rest.Config) []R }, ) } else { - for _, nodeSetSpecInline := range b.Spec.NodeSets { - nodeSetLabels := storageLabels.Copy() - nodeSetLabels = nodeSetLabels.Merge(nodeSetSpecInline.AdditionalLabels) - nodeSetLabels = nodeSetLabels.Merge(map[string]string{labels.StorageNodeSetComponent: nodeSetSpecInline.Name}) - - storageNodeSetSpec := b.recastStorageNodeSetSpecInline(nodeSetSpecInline.DeepCopy()) - if nodeSetSpecInline.Remote != nil { - if nodeSetSpecInline.Remote.Region != "" { - nodeSetLabels = nodeSetLabels.Merge(map[string]string{ - labels.TopologyRegionKey: nodeSetSpecInline.Remote.Region, - }) - } - nodeSetLabels = nodeSetLabels.Merge(map[string]string{ - labels.TopologyZoneKey: nodeSetSpecInline.Remote.Zone, - }) - optionalBuilders = append( - optionalBuilders, - &RemoteStorageNodeSetBuilder{ - Object: b, - - Name: b.Name + "-" + nodeSetSpecInline.Name, - Labels: nodeSetLabels, - - StorageNodeSetSpec: storageNodeSetSpec, - }, - ) - } else { - optionalBuilders = append( - optionalBuilders, - &StorageNodeSetBuilder{ - Object: b, - - Name: b.Name + "-" + nodeSetSpecInline.Name, - Labels: nodeSetLabels, - - StorageNodeSetSpec: storageNodeSetSpec, - }, - ) - } - } + optionalBuilders = append(optionalBuilders, b.getNodeSetBuilders(storageLabels)...) } return append( @@ -189,6 +150,53 @@ func (b *StorageClusterBuilder) GetResourceBuilders(restConfig *rest.Config) []R ) } +func (b *StorageClusterBuilder) getNodeSetBuilders(storageLabels labels.Labels) []ResourceBuilder { + var nodeSetBuilders []ResourceBuilder + + for _, nodeSetSpecInline := range b.Spec.NodeSets { + nodeSetLabels := storageLabels.Copy() + nodeSetLabels = nodeSetLabels.Merge(nodeSetSpecInline.AdditionalLabels) + nodeSetLabels = nodeSetLabels.Merge(map[string]string{labels.StorageNodeSetComponent: nodeSetSpecInline.Name}) + + storageNodeSetSpec := b.recastStorageNodeSetSpecInline(nodeSetSpecInline.DeepCopy()) + if nodeSetSpecInline.Remote != nil { + if nodeSetSpecInline.Remote.Region != "" { + nodeSetLabels = nodeSetLabels.Merge(map[string]string{ + labels.TopologyRegionKey: nodeSetSpecInline.Remote.Region, + }) + } + nodeSetLabels = nodeSetLabels.Merge(map[string]string{ + labels.TopologyZoneKey: nodeSetSpecInline.Remote.Zone, + }) + nodeSetBuilders = append( + nodeSetBuilders, + &RemoteStorageNodeSetBuilder{ + Object: b, + + Name: b.Name + "-" + nodeSetSpecInline.Name, + Labels: nodeSetLabels, + + StorageNodeSetSpec: storageNodeSetSpec, + }, + ) + } else { + nodeSetBuilders = append( + nodeSetBuilders, + &StorageNodeSetBuilder{ + Object: b, + + Name: b.Name + "-" + nodeSetSpecInline.Name, + Labels: nodeSetLabels, + + StorageNodeSetSpec: storageNodeSetSpec, + }, + ) + } + } + + return nodeSetBuilders +} + func (b *StorageClusterBuilder) recastStorageNodeSetSpecInline(nodeSetSpecInline *api.StorageNodeSetSpecInline) api.StorageNodeSetSpec { nodeSetSpec := api.StorageNodeSetSpec{} From f402f0e83e9ced7abd6034488a8bfb1649645fe9 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Tue, 6 Feb 2024 13:55:03 +0300 Subject: [PATCH 07/24] update rbac for remoteNodeSet objects --- deploy/ydb-operator/Chart.yaml | 4 ++-- deploy/ydb-operator/crds/database.yaml | 11 ++++++++++- deploy/ydb-operator/crds/storage.yaml | 11 ++++++++++- deploy/ydb-operator/templates/rbac-operator.yaml | 6 ++++++ 4 files changed, 28 insertions(+), 4 deletions(-) diff --git a/deploy/ydb-operator/Chart.yaml b/deploy/ydb-operator/Chart.yaml index 2c4cc5ac..43cd9e8d 100644 --- a/deploy/ydb-operator/Chart.yaml +++ b/deploy/ydb-operator/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.4.38 +version: 0.4.39 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.4.38" +appVersion: "0.4.39" diff --git a/deploy/ydb-operator/crds/database.yaml b/deploy/ydb-operator/crds/database.yaml index 94cd8b9b..1bedf321 100644 --- a/deploy/ydb-operator/crds/database.yaml +++ b/deploy/ydb-operator/crds/database.yaml @@ -3175,7 +3175,16 @@ spec: remote: description: (Optional) Object should be reference to remote object - type: boolean + properties: + region: + description: (Optional) Remote cloud region to deploy into + type: string + zone: + description: Remote cloud zone to deploy into + type: string + required: + - zone + type: object resources: description: (Optional) Database storage and compute resources properties: diff --git a/deploy/ydb-operator/crds/storage.yaml b/deploy/ydb-operator/crds/storage.yaml index 9ecfb4b9..cb9f9d30 100644 --- a/deploy/ydb-operator/crds/storage.yaml +++ b/deploy/ydb-operator/crds/storage.yaml @@ -3557,7 +3557,16 @@ spec: remote: description: (Optional) Object should be reference to remote object - type: boolean + properties: + region: + description: (Optional) Remote cloud region to deploy into + type: string + zone: + description: Remote cloud zone to deploy into + type: string + required: + - zone + type: object resources: description: '(Optional) Container resource limits. Any container limits can be specified. Default: (not specified)' diff --git a/deploy/ydb-operator/templates/rbac-operator.yaml b/deploy/ydb-operator/templates/rbac-operator.yaml index 99313d1f..06f0fbd9 100644 --- a/deploy/ydb-operator/templates/rbac-operator.yaml +++ b/deploy/ydb-operator/templates/rbac-operator.yaml @@ -179,7 +179,9 @@ rules: - ydb.tech resources: - databasenodesets + - remotedatabasenodesets - storagenodesets + - remotestoragenodesets verbs: - create - delete @@ -192,14 +194,18 @@ rules: - ydb.tech resources: - databasenodesets/finalizers + - remotedatabasenodesets/finalizers - storagenodesets/finalizers + - remotestoragenodesets/finalizers verbs: - update - apiGroups: - ydb.tech resources: - databasenodesets/status + - remotedatabasenodesets/status - storagenodesets/status + - remotestoragenodesets/status verbs: - get - patch From c9075521693c334c6a2582705e4ee95f9f9c5921 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Tue, 6 Feb 2024 19:04:46 +0300 Subject: [PATCH 08/24] fixes tests --- api/v1alpha1/databasenodeset_types.go | 5 ++--- api/v1alpha1/storagenodeset_types.go | 5 ++--- deploy/ydb-operator/crds/databasenodeset.yaml | 3 --- .../crds/remotedatabasenodeset.yaml | 3 --- .../crds/remotestoragenodeset.yaml | 3 --- deploy/ydb-operator/crds/storagenodeset.yaml | 3 --- e2e/tests/smoke_test.go | 20 ------------------- go.mod | 1 + go.sum | 2 ++ internal/controllers/constants/constants.go | 3 +++ internal/controllers/database/controller.go | 6 +++--- internal/controllers/storage/controller.go | 8 ++++---- 12 files changed, 17 insertions(+), 45 deletions(-) diff --git a/api/v1alpha1/databasenodeset_types.go b/api/v1alpha1/databasenodeset_types.go index 699111f2..ccaf2024 100644 --- a/api/v1alpha1/databasenodeset_types.go +++ b/api/v1alpha1/databasenodeset_types.go @@ -19,9 +19,8 @@ type DatabaseNodeSetSpec struct { // DatabaseNodeSetStatus defines the observed state type DatabaseNodeSetStatus struct { - State constants.ClusterState `json:"state"` - Conditions []metav1.Condition `json:"conditions,omitempty"` - ObservedDatabaseGeneration int64 `json:"observedDatabaseGeneration,omitempty"` + State constants.ClusterState `json:"state"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // DatabaseNodeSetSpecInline describes an group nodes object inside parent object diff --git a/api/v1alpha1/storagenodeset_types.go b/api/v1alpha1/storagenodeset_types.go index ce2ed1fc..7af6165c 100644 --- a/api/v1alpha1/storagenodeset_types.go +++ b/api/v1alpha1/storagenodeset_types.go @@ -19,9 +19,8 @@ type StorageNodeSetSpec struct { // StorageNodeSetStatus defines the observed state type StorageNodeSetStatus struct { - State constants.ClusterState `json:"state"` - Conditions []metav1.Condition `json:"conditions,omitempty"` - ObservedStorageGeneration int64 `json:"observedStorageGeneration,omitempty"` + State constants.ClusterState `json:"state"` + Conditions []metav1.Condition `json:"conditions,omitempty"` } // StorageNodeSetSpecInline describes an group nodes object inside parent object diff --git a/deploy/ydb-operator/crds/databasenodeset.yaml b/deploy/ydb-operator/crds/databasenodeset.yaml index f2c3330f..f2bcc4e2 100644 --- a/deploy/ydb-operator/crds/databasenodeset.yaml +++ b/deploy/ydb-operator/crds/databasenodeset.yaml @@ -4631,9 +4631,6 @@ spec: - type type: object type: array - observedDatabaseGeneration: - format: int64 - type: integer state: type: string required: diff --git a/deploy/ydb-operator/crds/remotedatabasenodeset.yaml b/deploy/ydb-operator/crds/remotedatabasenodeset.yaml index 23b70cce..8acaaed4 100644 --- a/deploy/ydb-operator/crds/remotedatabasenodeset.yaml +++ b/deploy/ydb-operator/crds/remotedatabasenodeset.yaml @@ -4632,9 +4632,6 @@ spec: - type type: object type: array - observedDatabaseGeneration: - format: int64 - type: integer state: type: string required: diff --git a/deploy/ydb-operator/crds/remotestoragenodeset.yaml b/deploy/ydb-operator/crds/remotestoragenodeset.yaml index 75eda050..d3785dea 100644 --- a/deploy/ydb-operator/crds/remotestoragenodeset.yaml +++ b/deploy/ydb-operator/crds/remotestoragenodeset.yaml @@ -4625,9 +4625,6 @@ spec: - type type: object type: array - observedStorageGeneration: - format: int64 - type: integer state: type: string required: diff --git a/deploy/ydb-operator/crds/storagenodeset.yaml b/deploy/ydb-operator/crds/storagenodeset.yaml index ecdc1d7d..dc30ed0e 100644 --- a/deploy/ydb-operator/crds/storagenodeset.yaml +++ b/deploy/ydb-operator/crds/storagenodeset.yaml @@ -4624,9 +4624,6 @@ spec: - type type: object type: array - observedStorageGeneration: - format: int64 - type: integer state: type: string required: diff --git a/e2e/tests/smoke_test.go b/e2e/tests/smoke_test.go index d647b6e3..21af8464 100644 --- a/e2e/tests/smoke_test.go +++ b/e2e/tests/smoke_test.go @@ -434,26 +434,6 @@ var _ = Describe("Operator smoke test", func() { } Expect(k8sClient.Update(ctx, &database)).Should(Succeed()) - By("check that ObservedDatabaseGeneration changed...") - Eventually(func(g Gomega) bool { - database := v1alpha1.Database{} - g.Expect(k8sClient.Get(ctx, types.NamespacedName{ - Name: databaseSample.Name, - Namespace: testobjects.YdbNamespace, - }, &database)).Should(Succeed()) - - databaseNodeSetList := v1alpha1.DatabaseNodeSetList{} - g.Expect(k8sClient.List(ctx, &databaseNodeSetList, - client.InNamespace(testobjects.YdbNamespace), - )).Should(Succeed()) - for _, databaseNodeSet := range databaseNodeSetList.Items { - if database.GetGeneration() != databaseNodeSet.Status.ObservedDatabaseGeneration { - return false - } - } - return true - }, Timeout, Interval).Should(BeTrue()) - By("expecting databaseNodeSet pods deletion...") Eventually(func(g Gomega) bool { database := v1alpha1.Database{} diff --git a/go.mod b/go.mod index 66173cf1..6bf8df2a 100644 --- a/go.mod +++ b/go.mod @@ -46,6 +46,7 @@ require ( github.com/google/pprof v0.0.0-20230510103437-eeec1cb781c3 // indirect github.com/google/uuid v1.3.0 // indirect github.com/imdario/mergo v0.3.13 // indirect + github.com/jgautheron/goconst v1.7.0 // indirect github.com/jonboulle/clockwork v0.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect diff --git a/go.sum b/go.sum index d51cc5f9..1b0fccd6 100644 --- a/go.sum +++ b/go.sum @@ -249,6 +249,8 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jgautheron/goconst v1.7.0 h1:cEqH+YBKLsECnRSd4F4TK5ri8t/aXtt/qoL0Ft252B0= +github.com/jgautheron/goconst v1.7.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= diff --git a/internal/controllers/constants/constants.go b/internal/controllers/constants/constants.go index 09a0f63e..bf8f9f17 100644 --- a/internal/controllers/constants/constants.go +++ b/internal/controllers/constants/constants.go @@ -5,6 +5,9 @@ import "time" type ClusterState string const ( + StorageResourceKind = "Storage" + DatabaseResourceKind = "Database" + StoragePausedCondition = "StoragePaused" StorageInitializedCondition = "StorageReady" StorageNodeSetReadyCondition = "StorageNodeSetReady" diff --git a/internal/controllers/database/controller.go b/internal/controllers/database/controller.go index 3b4e923e..26a8cb70 100644 --- a/internal/controllers/database/controller.go +++ b/internal/controllers/database/controller.go @@ -57,7 +57,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu err := r.Get(ctx, req.NamespacedName, database) if err != nil { if errors.IsNotFound(err) { - r.Log.Info("database resources not found") + r.Log.Info("%s resources not found", DatabaseResourceKind) return ctrl.Result{Requeue: false}, nil } r.Log.Error(err, "unexpected Get error") @@ -104,7 +104,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return nil } // ...make sure it's a Database... - if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != "Database" { + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != DatabaseResourceKind { return nil } @@ -126,7 +126,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return nil } // ...make sure it's a Database... - if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != "Database" { + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != DatabaseResourceKind { return nil } diff --git a/internal/controllers/storage/controller.go b/internal/controllers/storage/controller.go index 1157741f..a7186372 100644 --- a/internal/controllers/storage/controller.go +++ b/internal/controllers/storage/controller.go @@ -62,7 +62,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu err := r.Get(ctx, req.NamespacedName, storage) if err != nil { if errors.IsNotFound(err) { - r.Log.Info("storage resources not found") + r.Log.Info("%s resources not found", StorageResourceKind) return ctrl.Result{Requeue: false}, nil } r.Log.Error(err, "unexpected Get error") @@ -95,7 +95,7 @@ func ignoreDeletionPredicate() predicate.Predicate { func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { controller := ctrl.NewControllerManagedBy(mgr).For(&ydbv1alpha1.Storage{}) - r.Recorder = mgr.GetEventRecorderFor("Storage") + r.Recorder = mgr.GetEventRecorderFor(StorageResourceKind) if r.WithServiceMonitors { controller = controller. @@ -114,7 +114,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return nil } // ...make sure it's a Storage... - if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != "Storage" { + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != StorageResourceKind { return nil } @@ -136,7 +136,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return nil } // ...make sure it's a Storage... - if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != "Storage" { + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != StorageResourceKind { return nil } From c70ba20143219d81e4afc527990292fda131f5ff Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Thu, 8 Feb 2024 20:00:36 +0300 Subject: [PATCH 09/24] use only cluster field for remote --- api/v1alpha1/common_types.go | 8 ++---- deploy/ydb-operator/crds/database.yaml | 9 +++---- deploy/ydb-operator/crds/storage.yaml | 9 +++---- internal/labels/label.go | 6 ++--- internal/resources/database.go | 7 +---- internal/resources/remotedatabasenodeset.go | 30 +++------------------ internal/resources/remotestoragenodeset.go | 25 +---------------- internal/resources/storage.go | 7 +---- 8 files changed, 17 insertions(+), 84 deletions(-) diff --git a/api/v1alpha1/common_types.go b/api/v1alpha1/common_types.go index 2b2f8b7a..ae05d917 100644 --- a/api/v1alpha1/common_types.go +++ b/api/v1alpha1/common_types.go @@ -36,11 +36,7 @@ type PodImage struct { } type RemoteSpec struct { - // (Optional) Remote cloud region to deploy into - // +optional - Region string `json:"region,omitempty"` - - // Remote cloud zone to deploy into + // Remote cluster to deploy NodeSet into // +required - Zone string `json:"zone"` + Cluster string `json:"cluster"` } diff --git a/deploy/ydb-operator/crds/database.yaml b/deploy/ydb-operator/crds/database.yaml index 1bedf321..f9205f1f 100644 --- a/deploy/ydb-operator/crds/database.yaml +++ b/deploy/ydb-operator/crds/database.yaml @@ -3176,14 +3176,11 @@ spec: description: (Optional) Object should be reference to remote object properties: - region: - description: (Optional) Remote cloud region to deploy into - type: string - zone: - description: Remote cloud zone to deploy into + cluster: + description: Remote cluster to deploy NodeSet into type: string required: - - zone + - cluster type: object resources: description: (Optional) Database storage and compute resources diff --git a/deploy/ydb-operator/crds/storage.yaml b/deploy/ydb-operator/crds/storage.yaml index cb9f9d30..adeaef95 100644 --- a/deploy/ydb-operator/crds/storage.yaml +++ b/deploy/ydb-operator/crds/storage.yaml @@ -3558,14 +3558,11 @@ spec: description: (Optional) Object should be reference to remote object properties: - region: - description: (Optional) Remote cloud region to deploy into - type: string - zone: - description: Remote cloud zone to deploy into + cluster: + description: Remote cluster to deploy NodeSet into type: string required: - - zone + - cluster type: object resources: description: '(Optional) Container resource limits. Any container diff --git a/internal/labels/label.go b/internal/labels/label.go index 46417afd..16f0d65b 100644 --- a/internal/labels/label.go +++ b/internal/labels/label.go @@ -16,10 +16,6 @@ const ( PartOfKey = "app.kubernetes.io/part-of" // ManagedByKey The tool being used to manage the operation of an application ManagedByKey = "app.kubernetes.io/managed-by" - // TopologyZoneKey A zone represents a logical failure domain - TopologyZoneKey = "topology.kubernetes.io/zone" - // TopologyRegionKey A region represents a larger domain, made up of one or more zones - TopologyRegionKey = "topology.kubernetes.io/region" // ServiceComponent The specialization of a Service resource ServiceComponent = "ydb.tech/service-for" @@ -27,6 +23,8 @@ const ( StorageNodeSetComponent = "ydb.tech/storage-nodeset" // DatabaseNodeSetComponent The specialization of a DatabaseNodeSet resource DatabaseNodeSetComponent = "ydb.tech/database-nodeset" + // RemoteClusterKey The specialization of a remote k8s cluster + RemoteClusterKey = "ydb.tech/remote-cluster" StorageComponent = "storage-node" DynamicComponent = "dynamic-node" diff --git a/internal/resources/database.go b/internal/resources/database.go index 4b640b91..1d5cd0ac 100644 --- a/internal/resources/database.go +++ b/internal/resources/database.go @@ -208,13 +208,8 @@ func (b *DatabaseBuilder) getNodeSetBuilders(databaseLabels labels.Labels) []Res databaseNodeSetSpec := b.recastDatabaseNodeSetSpecInline(nodeSetSpecInline.DeepCopy()) if nodeSetSpecInline.Remote != nil { - if nodeSetSpecInline.Remote.Region != "" { - nodeSetLabels = nodeSetLabels.Merge(map[string]string{ - labels.TopologyRegionKey: nodeSetSpecInline.Remote.Region, - }) - } nodeSetLabels = nodeSetLabels.Merge(map[string]string{ - labels.TopologyZoneKey: nodeSetSpecInline.Remote.Zone, + labels.RemoteClusterKey: nodeSetSpecInline.Remote.Cluster, }) nodeSetBuilders = append( nodeSetBuilders, diff --git a/internal/resources/remotedatabasenodeset.go b/internal/resources/remotedatabasenodeset.go index 8984879c..954cf02d 100644 --- a/internal/resources/remotedatabasenodeset.go +++ b/internal/resources/remotedatabasenodeset.go @@ -3,14 +3,10 @@ package resources import ( "errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" - . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck ) type RemoteDatabaseNodeSetBuilder struct { @@ -52,14 +48,15 @@ func (b *RemoteDatabaseNodeSetBuilder) Placeholder(cr client.Object) client.Obje } } -func (b *RemoteDatabaseNodeSetResource) GetResourceBuilders(restConfig *rest.Config) []ResourceBuilder { +func (b *RemoteDatabaseNodeSetResource) GetResourceBuilders() []ResourceBuilder { var resourceBuilders []ResourceBuilder resourceBuilders = append(resourceBuilders, &DatabaseNodeSetBuilder{ Object: b, - Name: b.Name, - Labels: b.Labels, + Name: b.Name, + Labels: b.Labels, + DatabaseNodeSetSpec: b.Spec, }, ) @@ -71,22 +68,3 @@ func NewRemoteDatabaseNodeSet(remoteDatabaseNodeSet *api.RemoteDatabaseNodeSet) return RemoteDatabaseNodeSetResource{RemoteDatabaseNodeSet: crRemoteDatabaseNodeSet} } - -func (b *RemoteDatabaseNodeSetResource) SetStatusOnFirstReconcile() (bool, ctrl.Result, error) { - if b.Status.Conditions == nil { - b.Status.Conditions = []metav1.Condition{} - - if b.Spec.Pause { - meta.SetStatusCondition(&b.Status.Conditions, metav1.Condition{ - Type: DatabasePausedCondition, - Status: "False", - Reason: ReasonInProgress, - Message: "Transitioning RemoteDatabaseNodeSet to Paused state", - }) - - return Stop, ctrl.Result{RequeueAfter: StatusUpdateRequeueDelay}, nil - } - } - - return Continue, ctrl.Result{}, nil -} diff --git a/internal/resources/remotestoragenodeset.go b/internal/resources/remotestoragenodeset.go index a7a5fc99..60712727 100644 --- a/internal/resources/remotestoragenodeset.go +++ b/internal/resources/remotestoragenodeset.go @@ -3,14 +3,10 @@ package resources import ( "errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" - . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck ) type RemoteStorageNodeSetBuilder struct { @@ -52,7 +48,7 @@ func (b *RemoteStorageNodeSetBuilder) Placeholder(cr client.Object) client.Objec } } -func (b *RemoteStorageNodeSetResource) GetResourceBuilders(restConfig *rest.Config) []ResourceBuilder { +func (b *RemoteStorageNodeSetResource) GetResourceBuilders() []ResourceBuilder { var resourceBuilders []ResourceBuilder resourceBuilders = append(resourceBuilders, &StorageNodeSetBuilder{ @@ -71,22 +67,3 @@ func NewRemoteStorageNodeSet(remoteStorageNodeSet *api.RemoteStorageNodeSet) Rem return RemoteStorageNodeSetResource{RemoteStorageNodeSet: crRemoteStorageNodeSet} } - -func (b *RemoteStorageNodeSetResource) SetStatusOnFirstReconcile() (bool, ctrl.Result, error) { - if b.Status.Conditions == nil { - b.Status.Conditions = []metav1.Condition{} - - if b.Spec.Pause { - meta.SetStatusCondition(&b.Status.Conditions, metav1.Condition{ - Type: StoragePausedCondition, - Status: "False", - Reason: ReasonInProgress, - Message: "Transitioning RemoteStorageNodeSet to Paused state", - }) - - return Stop, ctrl.Result{RequeueAfter: StatusUpdateRequeueDelay}, nil - } - } - - return Continue, ctrl.Result{}, nil -} diff --git a/internal/resources/storage.go b/internal/resources/storage.go index 8442c2e2..54cd99a7 100644 --- a/internal/resources/storage.go +++ b/internal/resources/storage.go @@ -160,13 +160,8 @@ func (b *StorageClusterBuilder) getNodeSetBuilders(storageLabels labels.Labels) storageNodeSetSpec := b.recastStorageNodeSetSpecInline(nodeSetSpecInline.DeepCopy()) if nodeSetSpecInline.Remote != nil { - if nodeSetSpecInline.Remote.Region != "" { - nodeSetLabels = nodeSetLabels.Merge(map[string]string{ - labels.TopologyRegionKey: nodeSetSpecInline.Remote.Region, - }) - } nodeSetLabels = nodeSetLabels.Merge(map[string]string{ - labels.TopologyZoneKey: nodeSetSpecInline.Remote.Zone, + labels.RemoteClusterKey: nodeSetSpecInline.Remote.Cluster, }) nodeSetBuilders = append( nodeSetBuilders, From 41f793820b9cad5ec7de24afd32c6fad9672d6f5 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Thu, 8 Feb 2024 20:00:56 +0300 Subject: [PATCH 10/24] impl remote controllers --- cmd/ydb-kubernetes-operator/main.go | 67 +++++ internal/controllers/constants/constants.go | 4 + .../remotedatabasenodeset/controller.go | 128 ++++++++++ .../controllers/remotedatabasenodeset/sync.go | 152 ++++++++++++ .../remotestoragenodeset/controller.go | 129 ++++++++++ .../remotestoragenodeset/controller_test.go | 233 ++++++++++++++++++ .../controllers/remotestoragenodeset/sync.go | 152 ++++++++++++ 7 files changed, 865 insertions(+) create mode 100644 internal/controllers/remotedatabasenodeset/controller.go create mode 100644 internal/controllers/remotedatabasenodeset/sync.go create mode 100644 internal/controllers/remotestoragenodeset/controller.go create mode 100644 internal/controllers/remotestoragenodeset/controller_test.go create mode 100644 internal/controllers/remotestoragenodeset/sync.go diff --git a/cmd/ydb-kubernetes-operator/main.go b/cmd/ydb-kubernetes-operator/main.go index 809f82ee..974b5735 100644 --- a/cmd/ydb-kubernetes-operator/main.go +++ b/cmd/ydb-kubernetes-operator/main.go @@ -9,7 +9,10 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/tools/clientcmd" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/cluster" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -17,6 +20,8 @@ import ( "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/database" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/databasenodeset" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/monitoring" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/remotedatabasenodeset" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/remotestoragenodeset" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storage" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storagenodeset" ) @@ -39,6 +44,8 @@ func main() { var disableWebhooks bool var enableServiceMonitors bool var probeAddr string + var remoteKubeconfig string + var remoteCluster string flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, @@ -46,6 +53,8 @@ func main() { "Enabling this will ensure there is only one active controller manager.") flag.BoolVar(&disableWebhooks, "disable-webhooks", false, "Disable webhooks registration on start.") flag.BoolVar(&enableServiceMonitors, "with-service-monitors", false, "Enables service monitoring") + flag.StringVar(&remoteKubeconfig, "remote-kubeconfig", "/remote-kubeconfig", "Path to kubeconfig for remote k8s cluster. Only required if using Remote objects") + flag.StringVar(&remoteCluster, "remote-cluster", "", "The name of remote cluster to sync k8s resources. Only required if using Remote objects") opts := zap.Options{ Development: true, } @@ -143,6 +152,64 @@ func main() { os.Exit(1) } } + + if remoteKubeconfig != "" && remoteCluster != "" { + remoteConfig, err := clientcmd.BuildConfigFromFlags("", remoteKubeconfig) + if err != nil { + setupLog.Error(err, "unable to read remote kubeconfig") + os.Exit(1) + } + + storageSelector, err := remotestoragenodeset.BuildRemoteSelector(remoteCluster) + if err != nil { + setupLog.Error(err, "unable to create label selector", "selector", "RemoteStorageNodeSet") + os.Exit(1) + } + + databaseSelector, err := remotedatabasenodeset.BuildRemoteSelector(remoteCluster) + if err != nil { + setupLog.Error(err, "unable to create label selector", "selector", "RemoteDatabaseNodeSet") + os.Exit(1) + } + + remoteCluster, err := cluster.New(remoteConfig, func(o *cluster.Options) { + o.Scheme = scheme + o.NewCache = cache.BuilderWithOptions(cache.Options{ + SelectorsByObject: cache.SelectorsByObject{ + &ydbv1alpha1.RemoteStorageNodeSet{}: {Label: storageSelector}, + &ydbv1alpha1.RemoteDatabaseNodeSet{}: {Label: databaseSelector}, + }, + }) + }) + if err != nil { + setupLog.Error(err, "unable to create remote client") + os.Exit(1) + } + + if err = mgr.Add(remoteCluster); err != nil { + setupLog.Error(err, "unable to add remote client to controller manager") + os.Exit(1) + } + + if err = (&remotestoragenodeset.Reconciler{ + Client: mgr.GetClient(), + RemoteClient: remoteCluster.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr, &remoteCluster); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "RemoteStorageNodeSet") + os.Exit(1) + } + + if err = (&remotedatabasenodeset.Reconciler{ + Client: mgr.GetClient(), + RemoteClient: remoteCluster.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr, &remoteCluster); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "RemoteDatabaseNodeSet") + os.Exit(1) + } + } + //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/internal/controllers/constants/constants.go b/internal/controllers/constants/constants.go index bf8f9f17..1408f9d0 100644 --- a/internal/controllers/constants/constants.go +++ b/internal/controllers/constants/constants.go @@ -56,5 +56,9 @@ const ( StorageAwaitRequeueDelay = 30 * time.Second SharedDatabaseAwaitRequeueDelay = 30 * time.Second + PrimaryResourceNameAnnotation = "ydb.tech/primary-resource-name" + PrimaryResourceNamespaceAnnotation = "ydb.tech/primary-resource-namespace" + PrimaryResourceTypeAnnotation = "ydb.tech/primary-resource-type" + OwnerControllerKey = ".metadata.controller" ) diff --git a/internal/controllers/remotedatabasenodeset/controller.go b/internal/controllers/remotedatabasenodeset/controller.go new file mode 100644 index 00000000..6c1a2372 --- /dev/null +++ b/internal/controllers/remotedatabasenodeset/controller.go @@ -0,0 +1,128 @@ +package remotedatabasenodeset + +import ( + "context" + + "github.com/go-logr/logr" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" + . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck + ydblabels "github.com/ydb-platform/ydb-kubernetes-operator/internal/labels" +) + +// Reconciler reconciles a RemoteDatabaseNodeSet object +type Reconciler struct { + Client client.Client + RemoteClient client.Client + RemoteRecorder record.EventRecorder + Log logr.Logger + Scheme *runtime.Scheme +} + +//+kubebuilder:rbac:groups=ydb.tech,resources=remotedatabasenodesets,verbs=get;list;watch +//+kubebuilder:rbac:groups=ydb.tech,resources=remotedatabasenodesets/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=ydb.tech,resources=remotedatabasenodesets/finalizers,verbs=update +//+kubebuilder:rbac:groups=ydb.tech,resources=databasenodesets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=ydb.tech,resources=databasenodesets/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=ydb.tech,resources=databasenodesets/finalizers,verbs=update + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + remoteDatabaseNodeSet := &api.RemoteDatabaseNodeSet{} + + if err := r.RemoteClient.Get(ctx, req.NamespacedName, remoteDatabaseNodeSet); err != nil { + if apierrs.IsNotFound(err) { + logger.Info("RemotedatabaseNodeSet has been deleted") + return r.handleRemoteResourceDeleted(ctx, req) + } + logger.Error(err, "unable to get RemotedatabaseNodeSet") + return ctrl.Result{}, err + } + + result, err := r.Sync(ctx, remoteDatabaseNodeSet) + if err != nil { + r.Log.Error(err, "unexpected Sync error") + } + + return result, err +} + +func (r *Reconciler) handleRemoteResourceDeleted(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + databaseNodeSet := &api.DatabaseNodeSet{} + + if err := r.Client.Get(ctx, req.NamespacedName, databaseNodeSet); err != nil { + if apierrs.IsNotFound(err) { + logger.Info("databaseNodeSet has been deleted") + return ctrl.Result{Requeue: false}, nil + } + logger.Error(err, "unable to get databaseNodeSet") + return ctrl.Result{}, err + } + + if err := r.Client.Delete(ctx, databaseNodeSet); err != nil { + logger.Error(err, "unable to delete databaseNodeSet") + return ctrl.Result{}, err + } + + return ctrl.Result{Requeue: false}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.Cluster) error { + cluster := *remoteCluster + + r.RemoteRecorder = cluster.GetEventRecorderFor("RemoteDatabaseNodeSet") + + annotationFilter := func(mapObj client.Object) []reconcile.Request { + requests := make([]reconcile.Request, 0) + + annotations := mapObj.GetAnnotations() + primaryResourceName := annotations[PrimaryResourceNameAnnotation] + primaryResourceNamespace := annotations[PrimaryResourceNamespaceAnnotation] + + if primaryResourceName != "" && primaryResourceNamespace != "" { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: primaryResourceNamespace, + Name: primaryResourceName, + }, + }) + } + return requests + } + + return ctrl.NewControllerManagedBy(mgr). + Watches(source.NewKindWithCache(&api.RemoteDatabaseNodeSet{}, cluster.GetCache()), &handler.EnqueueRequestForObject{}). + Watches(&source.Kind{Type: &api.DatabaseNodeSet{}}, handler.EnqueueRequestsFromMapFunc(annotationFilter)). + Complete(r) +} + +func BuildRemoteSelector(remoteCluster string) (labels.Selector, error) { + labelRequirements := []labels.Requirement{} + remoteClusterRequirement, err := labels.NewRequirement( + ydblabels.RemoteClusterKey, + selection.Equals, + []string{remoteCluster}, + ) + if err != nil { + return nil, err + } + labelRequirements = append(labelRequirements, *remoteClusterRequirement) + return labels.NewSelector().Add(labelRequirements...), nil +} diff --git a/internal/controllers/remotedatabasenodeset/sync.go b/internal/controllers/remotedatabasenodeset/sync.go new file mode 100644 index 00000000..5babbb18 --- /dev/null +++ b/internal/controllers/remotedatabasenodeset/sync.go @@ -0,0 +1,152 @@ +package remotedatabasenodeset + +import ( + "context" + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + ydbv1alpha1 "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" + . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck + "github.com/ydb-platform/ydb-kubernetes-operator/internal/resources" +) + +func (r *Reconciler) Sync(ctx context.Context, crRemoteDatabaseNodeSet *ydbv1alpha1.RemoteDatabaseNodeSet) (ctrl.Result, error) { + var stop bool + var result ctrl.Result + var err error + + remoteDatabaseNodeSet := resources.NewRemoteDatabaseNodeSet(crRemoteDatabaseNodeSet) + stop, result, err = r.handleResourcesSync(ctx, &remoteDatabaseNodeSet) + if stop { + return result, err + } + + stop, result, err = r.updateStatus(ctx, &remoteDatabaseNodeSet) + if stop { + return result, err + } + + return result, err +} + +func (r *Reconciler) handleResourcesSync( + ctx context.Context, + remoteDatabaseNodeSet *resources.RemoteDatabaseNodeSetResource, +) (bool, ctrl.Result, error) { + r.Log.Info("running step handleResourcesSync") + + for _, builder := range remoteDatabaseNodeSet.GetResourceBuilders() { + newResource := builder.Placeholder(remoteDatabaseNodeSet) + + result, err := resources.CreateOrUpdateOrMaybeIgnore(ctx, r.Client, newResource, func() error { + err := builder.Build(newResource) + if err != nil { + r.RemoteRecorder.Event( + remoteDatabaseNodeSet, + corev1.EventTypeWarning, + "ProvisioningFailed", + fmt.Sprintf("Failed building resources: %s", err), + ) + return err + } + + // Set primary resource annotation + newResource.SetAnnotations(map[string]string{ + PrimaryResourceNameAnnotation: remoteDatabaseNodeSet.GetName(), + PrimaryResourceNamespaceAnnotation: remoteDatabaseNodeSet.GetNamespace(), + PrimaryResourceTypeAnnotation: remoteDatabaseNodeSet.GetObjectKind().GroupVersionKind().Kind, + }) + return nil + }, func(oldObj, newObj runtime.Object) bool { + return false + }) + + eventMessage := fmt.Sprintf( + "Resource: %s, Namespace: %s, Name: %s", + reflect.TypeOf(newResource), + newResource.GetNamespace(), + newResource.GetName(), + ) + if err != nil { + r.RemoteRecorder.Event( + remoteDatabaseNodeSet, + corev1.EventTypeWarning, + "ProvisioningFailed", + eventMessage+fmt.Sprintf(", failed to sync, error: %s", err), + ) + return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } else if result == controllerutil.OperationResultCreated || result == controllerutil.OperationResultUpdated { + r.RemoteRecorder.Event( + remoteDatabaseNodeSet, + corev1.EventTypeNormal, + "Provisioning", + eventMessage+fmt.Sprintf(", changed, result: %s", result), + ) + } + } + r.Log.Info("resource sync complete") + return Continue, ctrl.Result{Requeue: false}, nil +} + +func (r *Reconciler) updateStatus( + ctx context.Context, + remoteDatabaseNodeSet *resources.RemoteDatabaseNodeSetResource, +) (bool, ctrl.Result, error) { + r.Log.Info("running step updateStatus") + + databaseNodeSet := ydbv1alpha1.DatabaseNodeSet{} + err := r.Client.Get(ctx, types.NamespacedName{ + Name: remoteDatabaseNodeSet.Name, + Namespace: remoteDatabaseNodeSet.Namespace, + }, &databaseNodeSet) + + if err != nil { + if errors.IsNotFound(err) { + r.RemoteRecorder.Event( + remoteDatabaseNodeSet, + corev1.EventTypeWarning, + "ProvisioningFailed", + fmt.Sprintf("databaseNodeSet with name %s was not found: %s", remoteDatabaseNodeSet.Name, err), + ) + return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, nil + } + r.RemoteRecorder.Event( + remoteDatabaseNodeSet, + corev1.EventTypeWarning, + "ControllerError", + fmt.Sprintf("Failed to get databaseNodeSet: %s", err), + ) + return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } + + oldStatus := remoteDatabaseNodeSet.Status.State + remoteDatabaseNodeSet.Status.State = databaseNodeSet.Status.State + remoteDatabaseNodeSet.Status.Conditions = databaseNodeSet.Status.Conditions + + err = r.RemoteClient.Status().Update(ctx, remoteDatabaseNodeSet) + if err != nil { + r.RemoteRecorder.Event( + remoteDatabaseNodeSet, + corev1.EventTypeWarning, + "ControllerError", + fmt.Sprintf("Failed setting status: %s", err), + ) + return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } else if oldStatus != databaseNodeSet.Status.State { + r.RemoteRecorder.Event( + remoteDatabaseNodeSet, + corev1.EventTypeNormal, + "StatusChanged", + fmt.Sprintf("databaseNodeSet moved from %s to %s", oldStatus, databaseNodeSet.Status.State), + ) + } + + return Continue, ctrl.Result{Requeue: false}, nil +} diff --git a/internal/controllers/remotestoragenodeset/controller.go b/internal/controllers/remotestoragenodeset/controller.go new file mode 100644 index 00000000..48380953 --- /dev/null +++ b/internal/controllers/remotestoragenodeset/controller.go @@ -0,0 +1,129 @@ +package remotestoragenodeset + +import ( + "context" + + "github.com/go-logr/logr" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" + . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck + ydblabels "github.com/ydb-platform/ydb-kubernetes-operator/internal/labels" +) + +// Reconciler reconciles a RemoteStorageNodeSet object +type Reconciler struct { + Client client.Client + RemoteClient client.Client + RemoteRecorder record.EventRecorder + Log logr.Logger + Scheme *runtime.Scheme +} + +//+kubebuilder:rbac:groups=ydb.tech,resources=remotestoragenodesets,verbs=get;list;watch +//+kubebuilder:rbac:groups=ydb.tech,resources=remotestoragenodesets/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=ydb.tech,resources=remotestoragenodesets/finalizers,verbs=update +//+kubebuilder:rbac:groups=ydb.tech,resources=storagenodesets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=ydb.tech,resources=storagenodesets/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=ydb.tech,resources=storagenodesets/finalizers,verbs=update + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + remoteStorageNodeSet := &api.RemoteStorageNodeSet{} + + if err := r.RemoteClient.Get(ctx, req.NamespacedName, remoteStorageNodeSet); err != nil { + if apierrs.IsNotFound(err) { + logger.Info("RemoteStorageNodeSet has been deleted") + return r.handleRemoteResourceDeleted(ctx, req) + } + logger.Error(err, "unable to get RemoteStorageNodeSet") + return ctrl.Result{}, err + } + + result, err := r.Sync(ctx, remoteStorageNodeSet) + if err != nil { + r.Log.Error(err, "unexpected Sync error") + } + + return result, err +} + +func (r *Reconciler) handleRemoteResourceDeleted(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + storageNodeSet := &api.StorageNodeSet{} + + if err := r.Client.Get(ctx, req.NamespacedName, storageNodeSet); err != nil { + if apierrs.IsNotFound(err) { + logger.Info("StorageNodeSet has been deleted") + return ctrl.Result{Requeue: false}, nil + } + logger.Error(err, "unable to get StorageNodeSet") + return ctrl.Result{}, err + } + + if err := r.Client.Delete(ctx, storageNodeSet); err != nil { + logger.Error(err, "unable to delete StorageNodeSet") + return ctrl.Result{}, err + } + + return ctrl.Result{Requeue: false}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.Cluster) error { + cluster := *remoteCluster + + r.RemoteRecorder = cluster.GetEventRecorderFor("RemoteStorageNodeSet") + + annotationFilter := func(mapObj client.Object) []reconcile.Request { + requests := make([]reconcile.Request, 0) + + annotations := mapObj.GetAnnotations() + primaryResourceName := annotations[PrimaryResourceNameAnnotation] + primaryResourceNamespace := annotations[PrimaryResourceNamespaceAnnotation] + + if primaryResourceName != "" && primaryResourceNamespace != "" { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: primaryResourceNamespace, + Name: primaryResourceName, + }, + }) + } + return requests + } + + return ctrl.NewControllerManagedBy(mgr). + Named("RemoteStorageNodeSet"). + Watches(source.NewKindWithCache(&api.RemoteStorageNodeSet{}, cluster.GetCache()), &handler.EnqueueRequestForObject{}). + Watches(&source.Kind{Type: &api.StorageNodeSet{}}, handler.EnqueueRequestsFromMapFunc(annotationFilter)). + Complete(r) +} + +func BuildRemoteSelector(remoteCluster string) (labels.Selector, error) { + labelRequirements := []labels.Requirement{} + remoteClusterRequirement, err := labels.NewRequirement( + ydblabels.RemoteClusterKey, + selection.Equals, + []string{remoteCluster}, + ) + if err != nil { + return nil, err + } + labelRequirements = append(labelRequirements, *remoteClusterRequirement) + return labels.NewSelector().Add(labelRequirements...), nil +} diff --git a/internal/controllers/remotestoragenodeset/controller_test.go b/internal/controllers/remotestoragenodeset/controller_test.go new file mode 100644 index 00000000..02e29eb1 --- /dev/null +++ b/internal/controllers/remotestoragenodeset/controller_test.go @@ -0,0 +1,233 @@ +package remotestoragenodeset_test + +import ( + "context" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubectl/pkg/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" + testobjects "github.com/ydb-platform/ydb-kubernetes-operator/e2e/tests/test-objects" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/remotestoragenodeset" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storage" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storagenodeset" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/test" +) + +const ( + testRemoteCluster = "remote-cluster" + testNodeSetName = "nodeset" +) + +var ( + localClient client.Client + remoteClient client.Client + localEnv *envtest.Environment + remoteEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc +) + +func TestRemoteNodeSetApis(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "RemoteStorageNodeSet controller tests") +} + +var _ = BeforeSuite(func() { + By("bootstrapping test environment") + + ctx, cancel = context.WithCancel(context.TODO()) + + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + localEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")}, + } + remoteEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")}, + } + + err := api.AddToScheme(scheme.Scheme) + Expect(err).ShouldNot(HaveOccurred()) + + localCfg, err := localEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(localCfg).ToNot(BeNil()) + + remoteCfg, err := remoteEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(remoteCfg).ToNot(BeNil()) + + // +kubebuilder:scaffold:scheme + + localManager, err := ctrl.NewManager(localCfg, ctrl.Options{ + MetricsBindAddress: "0", + Scheme: scheme.Scheme, + }) + Expect(err).ShouldNot(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + remoteManager, err := ctrl.NewManager(remoteCfg, ctrl.Options{ + MetricsBindAddress: "0", + Scheme: scheme.Scheme, + }) + Expect(err).ShouldNot(HaveOccurred()) + + storageSelector, err := remotestoragenodeset.BuildRemoteSelector(testRemoteCluster) + Expect(err).ShouldNot(HaveOccurred()) + + remoteCluster, err := cluster.New(localCfg, func(o *cluster.Options) { + o.Scheme = scheme.Scheme + o.NewCache = cache.BuilderWithOptions(cache.Options{ + SelectorsByObject: cache.SelectorsByObject{ + &api.RemoteStorageNodeSet{}: {Label: storageSelector}, + }, + }) + }) + Expect(err).ShouldNot(HaveOccurred()) + + err = remoteManager.Add(remoteCluster) + Expect(err).ShouldNot(HaveOccurred()) + + err = (&storage.Reconciler{ + Client: localManager.GetClient(), + Scheme: localManager.GetScheme(), + Config: localManager.GetConfig(), + Recorder: localManager.GetEventRecorderFor("ydb-operator"), + }).SetupWithManager(localManager) + Expect(err).ShouldNot(HaveOccurred()) + + err = (&storagenodeset.Reconciler{ + Client: localManager.GetClient(), + Scheme: localManager.GetScheme(), + Config: localManager.GetConfig(), + Recorder: localManager.GetEventRecorderFor("ydb-operator"), + }).SetupWithManager(localManager) + Expect(err).ShouldNot(HaveOccurred()) + + err = (&storagenodeset.Reconciler{ + Client: remoteManager.GetClient(), + Scheme: remoteManager.GetScheme(), + Config: remoteManager.GetConfig(), + Recorder: remoteManager.GetEventRecorderFor("ydb-operator"), + }).SetupWithManager(remoteManager) + Expect(err).ShouldNot(HaveOccurred()) + + err = (&remotestoragenodeset.Reconciler{ + Client: remoteManager.GetClient(), + RemoteClient: localManager.GetClient(), + Scheme: remoteManager.GetScheme(), + RemoteRecorder: remoteManager.GetEventRecorderFor("ydb-operator"), + }).SetupWithManager(remoteManager, &remoteCluster) + Expect(err).ShouldNot(HaveOccurred()) + + go func() { + defer GinkgoRecover() + err = localManager.Start(ctx) + Expect(err).ShouldNot(HaveOccurred()) + }() + + go func() { + defer GinkgoRecover() + err = remoteManager.Start(ctx) + Expect(err).ShouldNot(HaveOccurred()) + }() + + localClient = localManager.GetClient() + Expect(localClient).ToNot(BeNil()) + remoteClient = remoteManager.GetClient() + Expect(remoteClient).ToNot(BeNil()) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := localEnv.Stop() + Expect(err).ToNot(HaveOccurred()) + err = remoteEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) + +var _ = Describe("RemoteStorageNodeSet controller tests", func() { + var localNamespace corev1.Namespace + var remoteNamespace corev1.Namespace + var storageSample *api.Storage + + BeforeEach(func() { + localNamespace = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testobjects.YdbNamespace, + }, + } + remoteNamespace = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testobjects.YdbNamespace, + }, + } + Expect(localClient.Create(ctx, &localNamespace)).Should(Succeed()) + Expect(remoteClient.Create(ctx, &remoteNamespace)).Should(Succeed()) + }) + + AfterEach(func() { + Expect(localClient.Delete(ctx, &localNamespace)).Should(Succeed()) + Expect(remoteClient.Delete(ctx, &remoteNamespace)).Should(Succeed()) + }) + + When("Create Storage with RemoteStorageNodeSet in k8s-mgmt-cluster", func() { + It("Should create StorageNodeSet in k8s-data-cluster", func() { + storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, api.StorageNodeSetSpecInline{ + Name: testNodeSetName + "-remote", + Remote: &api.RemoteSpec{ + Cluster: testRemoteCluster, + }, + StorageNodeSpec: api.StorageNodeSpec{ + Nodes: 4, + }, + }) + Expect(localClient.Create(ctx, storageSample)).Should(Succeed()) + Eventually(func() bool { + foundRemoteStorageNodeSet := api.RemoteStorageNodeSetList{} + + Expect(localClient.List(ctx, &foundRemoteStorageNodeSet, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + for _, nodeset := range foundRemoteStorageNodeSet.Items { + if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-remote" { + return true + } + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) + Eventually(func() bool { + foundStorageNodeSetInRemote := api.StorageNodeSetList{} + + Expect(remoteClient.List(ctx, &foundStorageNodeSetInRemote, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + for _, nodeset := range foundStorageNodeSetInRemote.Items { + if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-remote" { + return true + } + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) + }) + }) +}) diff --git a/internal/controllers/remotestoragenodeset/sync.go b/internal/controllers/remotestoragenodeset/sync.go new file mode 100644 index 00000000..55544609 --- /dev/null +++ b/internal/controllers/remotestoragenodeset/sync.go @@ -0,0 +1,152 @@ +package remotestoragenodeset + +import ( + "context" + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + ydbv1alpha1 "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" + . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck + "github.com/ydb-platform/ydb-kubernetes-operator/internal/resources" +) + +func (r *Reconciler) Sync(ctx context.Context, crRemoteStorageNodeSet *ydbv1alpha1.RemoteStorageNodeSet) (ctrl.Result, error) { + var stop bool + var result ctrl.Result + var err error + + RemoteStorageNodeSet := resources.NewRemoteStorageNodeSet(crRemoteStorageNodeSet) + stop, result, err = r.handleResourcesSync(ctx, &RemoteStorageNodeSet) + if stop { + return result, err + } + + stop, result, err = r.updateStatus(ctx, &RemoteStorageNodeSet) + if stop { + return result, err + } + + return result, err +} + +func (r *Reconciler) handleResourcesSync( + ctx context.Context, + remoteStorageNodeSet *resources.RemoteStorageNodeSetResource, +) (bool, ctrl.Result, error) { + r.Log.Info("running step handleResourcesSync") + + for _, builder := range remoteStorageNodeSet.GetResourceBuilders() { + newResource := builder.Placeholder(remoteStorageNodeSet) + + result, err := resources.CreateOrUpdateOrMaybeIgnore(ctx, r.Client, newResource, func() error { + err := builder.Build(newResource) + if err != nil { + r.RemoteRecorder.Event( + remoteStorageNodeSet, + corev1.EventTypeWarning, + "ProvisioningFailed", + fmt.Sprintf("Failed building resources: %s", err), + ) + return err + } + + // Set primary resource annotation + newResource.SetAnnotations(map[string]string{ + PrimaryResourceNameAnnotation: remoteStorageNodeSet.GetName(), + PrimaryResourceNamespaceAnnotation: remoteStorageNodeSet.GetNamespace(), + PrimaryResourceTypeAnnotation: remoteStorageNodeSet.GetObjectKind().GroupVersionKind().Kind, + }) + return nil + }, func(oldObj, newObj runtime.Object) bool { + return false + }) + + eventMessage := fmt.Sprintf( + "Resource: %s, Namespace: %s, Name: %s", + reflect.TypeOf(newResource), + newResource.GetNamespace(), + newResource.GetName(), + ) + if err != nil { + r.RemoteRecorder.Event( + remoteStorageNodeSet, + corev1.EventTypeWarning, + "ProvisioningFailed", + eventMessage+fmt.Sprintf(", failed to sync, error: %s", err), + ) + return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } else if result == controllerutil.OperationResultCreated || result == controllerutil.OperationResultUpdated { + r.RemoteRecorder.Event( + remoteStorageNodeSet, + corev1.EventTypeNormal, + "Provisioning", + eventMessage+fmt.Sprintf(", changed, result: %s", result), + ) + } + } + r.Log.Info("resource sync complete") + return Continue, ctrl.Result{Requeue: false}, nil +} + +func (r *Reconciler) updateStatus( + ctx context.Context, + remoteStorageNodeSet *resources.RemoteStorageNodeSetResource, +) (bool, ctrl.Result, error) { + r.Log.Info("running step updateStatus") + + storageNodeSet := ydbv1alpha1.StorageNodeSet{} + err := r.Client.Get(ctx, types.NamespacedName{ + Name: remoteStorageNodeSet.Name, + Namespace: remoteStorageNodeSet.Namespace, + }, &storageNodeSet) + + if err != nil { + if errors.IsNotFound(err) { + r.RemoteRecorder.Event( + remoteStorageNodeSet, + corev1.EventTypeWarning, + "ProvisioningFailed", + fmt.Sprintf("StorageNodeSet with name %s was not found on remote: %s", remoteStorageNodeSet.Name, err), + ) + return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, nil + } + r.RemoteRecorder.Event( + remoteStorageNodeSet, + corev1.EventTypeWarning, + "ControllerError", + fmt.Sprintf("Failed to get StorageNodeSet on remote: %s", err), + ) + return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } + + oldStatus := remoteStorageNodeSet.Status.State + remoteStorageNodeSet.Status.State = storageNodeSet.Status.State + remoteStorageNodeSet.Status.Conditions = storageNodeSet.Status.Conditions + + err = r.RemoteClient.Status().Update(ctx, remoteStorageNodeSet) + if err != nil { + r.RemoteRecorder.Event( + remoteStorageNodeSet, + corev1.EventTypeWarning, + "ControllerError", + fmt.Sprintf("Failed setting status: %s", err), + ) + return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } else if oldStatus != storageNodeSet.Status.State { + r.RemoteRecorder.Event( + remoteStorageNodeSet, + corev1.EventTypeNormal, + "StatusChanged", + fmt.Sprintf("RemoteStorageNodeSet moved from %s to %s", oldStatus, storageNodeSet.Status.State), + ) + } + + return Continue, ctrl.Result{Requeue: false}, nil +} From 97f2096c6b038015fd5a286029f7b5a47a2efd2b Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Fri, 9 Feb 2024 19:06:20 +0300 Subject: [PATCH 11/24] add remoteFinalizer to wait resource deletion --- cmd/ydb-kubernetes-operator/main.go | 1 + internal/controllers/constants/constants.go | 2 + .../controllers/remotedatabasenodeset/sync.go | 1 - .../remotestoragenodeset/controller.go | 69 ++++++++++++++----- .../controllers/remotestoragenodeset/sync.go | 7 +- .../controllers/storagenodeset/controller.go | 4 +- 6 files changed, 61 insertions(+), 23 deletions(-) diff --git a/cmd/ydb-kubernetes-operator/main.go b/cmd/ydb-kubernetes-operator/main.go index 974b5735..d85f720a 100644 --- a/cmd/ydb-kubernetes-operator/main.go +++ b/cmd/ydb-kubernetes-operator/main.go @@ -153,6 +153,7 @@ func main() { } } + //nolint:nestif if remoteKubeconfig != "" && remoteCluster != "" { remoteConfig, err := clientcmd.BuildConfigFromFlags("", remoteKubeconfig) if err != nil { diff --git a/internal/controllers/constants/constants.go b/internal/controllers/constants/constants.go index 1408f9d0..2271d36e 100644 --- a/internal/controllers/constants/constants.go +++ b/internal/controllers/constants/constants.go @@ -60,5 +60,7 @@ const ( PrimaryResourceNamespaceAnnotation = "ydb.tech/primary-resource-namespace" PrimaryResourceTypeAnnotation = "ydb.tech/primary-resource-type" + RemoteFinalizerKey = "ydb.tech/remote-finalizer" + OwnerControllerKey = ".metadata.controller" ) diff --git a/internal/controllers/remotedatabasenodeset/sync.go b/internal/controllers/remotedatabasenodeset/sync.go index 5babbb18..94695062 100644 --- a/internal/controllers/remotedatabasenodeset/sync.go +++ b/internal/controllers/remotedatabasenodeset/sync.go @@ -106,7 +106,6 @@ func (r *Reconciler) updateStatus( Name: remoteDatabaseNodeSet.Name, Namespace: remoteDatabaseNodeSet.Namespace, }, &databaseNodeSet) - if err != nil { if errors.IsNotFound(err) { r.RemoteRecorder.Event( diff --git a/internal/controllers/remotestoragenodeset/controller.go b/internal/controllers/remotestoragenodeset/controller.go index 48380953..1b44e850 100644 --- a/internal/controllers/remotestoragenodeset/controller.go +++ b/internal/controllers/remotestoragenodeset/controller.go @@ -4,7 +4,7 @@ import ( "context" "github.com/go-logr/logr" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/selection" @@ -13,6 +13,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -43,14 +44,48 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu logger := log.FromContext(ctx) remoteStorageNodeSet := &api.RemoteStorageNodeSet{} - + // we'll ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. if err := r.RemoteClient.Get(ctx, req.NamespacedName, remoteStorageNodeSet); err != nil { - if apierrs.IsNotFound(err) { - logger.Info("RemoteStorageNodeSet has been deleted") - return r.handleRemoteResourceDeleted(ctx, req) + if apierrors.IsNotFound(err) { + logger.Info("StorageNodeSet has been deleted") + return ctrl.Result{Requeue: false}, nil } logger.Error(err, "unable to get RemoteStorageNodeSet") - return ctrl.Result{}, err + return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, nil + } + + // examine DeletionTimestamp to determine if object is under deletion + if remoteStorageNodeSet.ObjectMeta.DeletionTimestamp.IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. This is equivalent + // to registering our finalizer. + if !controllerutil.ContainsFinalizer(remoteStorageNodeSet, RemoteFinalizerKey) { + controllerutil.AddFinalizer(remoteStorageNodeSet, RemoteFinalizerKey) + if err := r.RemoteClient.Update(ctx, remoteStorageNodeSet); err != nil { + return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } + } + } else { + // The object is being deleted + if controllerutil.ContainsFinalizer(remoteStorageNodeSet, RemoteFinalizerKey) { + // our finalizer is present, so lets handle any external dependency + if err := r.deleteExternalResources(ctx, remoteStorageNodeSet); err != nil { + // if fail to delete the external dependency here, return with error + // so that it can be retried. + return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } + + // remove our finalizer from the list and update it. + controllerutil.RemoveFinalizer(remoteStorageNodeSet, RemoteFinalizerKey) + if err := r.RemoteClient.Update(ctx, remoteStorageNodeSet); err != nil { + return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } + } + + // Stop reconciliation as the item is being deleted + return ctrl.Result{Requeue: false}, nil } result, err := r.Sync(ctx, remoteStorageNodeSet) @@ -61,26 +96,28 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return result, err } -func (r *Reconciler) handleRemoteResourceDeleted(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *Reconciler) deleteExternalResources(ctx context.Context, remoteStorageNodeSet *api.RemoteStorageNodeSet) error { logger := log.FromContext(ctx) storageNodeSet := &api.StorageNodeSet{} - - if err := r.Client.Get(ctx, req.NamespacedName, storageNodeSet); err != nil { - if apierrs.IsNotFound(err) { - logger.Info("StorageNodeSet has been deleted") - return ctrl.Result{Requeue: false}, nil + if err := r.Client.Get(ctx, types.NamespacedName{ + Name: remoteStorageNodeSet.Name, + Namespace: remoteStorageNodeSet.Namespace, + }, storageNodeSet); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("StorageNodeSet not found") + return nil } logger.Error(err, "unable to get StorageNodeSet") - return ctrl.Result{}, err + return err } if err := r.Client.Delete(ctx, storageNodeSet); err != nil { logger.Error(err, "unable to delete StorageNodeSet") - return ctrl.Result{}, err + return err } - return ctrl.Result{Requeue: false}, nil + return nil } // SetupWithManager sets up the controller with the Manager. @@ -108,7 +145,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.C } return ctrl.NewControllerManagedBy(mgr). - Named("RemoteStorageNodeSet"). + Named("remotestoragenodeset"). Watches(source.NewKindWithCache(&api.RemoteStorageNodeSet{}, cluster.GetCache()), &handler.EnqueueRequestForObject{}). Watches(&source.Kind{Type: &api.StorageNodeSet{}}, handler.EnqueueRequestsFromMapFunc(annotationFilter)). Complete(r) diff --git a/internal/controllers/remotestoragenodeset/sync.go b/internal/controllers/remotestoragenodeset/sync.go index 55544609..04bcb312 100644 --- a/internal/controllers/remotestoragenodeset/sync.go +++ b/internal/controllers/remotestoragenodeset/sync.go @@ -22,13 +22,13 @@ func (r *Reconciler) Sync(ctx context.Context, crRemoteStorageNodeSet *ydbv1alph var result ctrl.Result var err error - RemoteStorageNodeSet := resources.NewRemoteStorageNodeSet(crRemoteStorageNodeSet) - stop, result, err = r.handleResourcesSync(ctx, &RemoteStorageNodeSet) + remoteStorageNodeSet := resources.NewRemoteStorageNodeSet(crRemoteStorageNodeSet) + stop, result, err = r.handleResourcesSync(ctx, &remoteStorageNodeSet) if stop { return result, err } - stop, result, err = r.updateStatus(ctx, &RemoteStorageNodeSet) + stop, result, err = r.updateStatus(ctx, &remoteStorageNodeSet) if stop { return result, err } @@ -106,7 +106,6 @@ func (r *Reconciler) updateStatus( Name: remoteStorageNodeSet.Name, Namespace: remoteStorageNodeSet.Namespace, }, &storageNodeSet) - if err != nil { if errors.IsNotFound(err) { r.RemoteRecorder.Event( diff --git a/internal/controllers/storagenodeset/controller.go b/internal/controllers/storagenodeset/controller.go index 6046cbcd..45330eef 100644 --- a/internal/controllers/storagenodeset/controller.go +++ b/internal/controllers/storagenodeset/controller.go @@ -6,7 +6,7 @@ import ( "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" @@ -47,7 +47,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu crStorageNodeSet := &api.StorageNodeSet{} err := r.Get(ctx, req.NamespacedName, crStorageNodeSet) if err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { logger.Info("StorageNodeSet has been deleted") return ctrl.Result{Requeue: false}, nil } From 2f49d90424ba44b1b8b6d5b7ecf9508e444a8f72 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Tue, 6 Feb 2024 19:04:46 +0300 Subject: [PATCH 12/24] fixes tests --- e2e/tests/smoke_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/e2e/tests/smoke_test.go b/e2e/tests/smoke_test.go index 8684c71b..7b69dd19 100644 --- a/e2e/tests/smoke_test.go +++ b/e2e/tests/smoke_test.go @@ -431,7 +431,6 @@ var _ = Describe("Operator smoke test", func() { checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) database := v1alpha1.Database{} - databaseNodeSetList := v1alpha1.DatabaseNodeSetList{} databasePods := corev1.PodList{} By("delete nodeSetSpec inline to check inheritance...") Eventually(func(g Gomega) error { From 98b294f2d5cb89c16880741be3468afc6ee14e00 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Thu, 8 Feb 2024 20:05:13 +0300 Subject: [PATCH 13/24] fix go.mod --- go.mod | 1 - go.sum | 2 -- 2 files changed, 3 deletions(-) diff --git a/go.mod b/go.mod index 6bf8df2a..66173cf1 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,6 @@ require ( github.com/google/pprof v0.0.0-20230510103437-eeec1cb781c3 // indirect github.com/google/uuid v1.3.0 // indirect github.com/imdario/mergo v0.3.13 // indirect - github.com/jgautheron/goconst v1.7.0 // indirect github.com/jonboulle/clockwork v0.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect diff --git a/go.sum b/go.sum index 1b0fccd6..d51cc5f9 100644 --- a/go.sum +++ b/go.sum @@ -249,8 +249,6 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jgautheron/goconst v1.7.0 h1:cEqH+YBKLsECnRSd4F4TK5ri8t/aXtt/qoL0Ft252B0= -github.com/jgautheron/goconst v1.7.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= From 1d858760f9f59b364146998ffa844e8dd7487d2d Mon Sep 17 00:00:00 2001 From: kobzonega <122476665+kobzonega@users.noreply.github.com> Date: Tue, 6 Feb 2024 12:20:00 +0300 Subject: [PATCH 14/24] fix e2e tests check running and ready pods (#177) From 0a82ea24fc3c05379173add93d5816577886274e Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Fri, 9 Feb 2024 19:29:14 +0300 Subject: [PATCH 15/24] remove useless constants --- internal/controllers/constants/constants.go | 5 ----- internal/controllers/database/controller.go | 9 +++++---- internal/controllers/database/sync.go | 8 -------- internal/controllers/storage/controller.go | 12 ++++++------ internal/controllers/storage/sync.go | 8 -------- 5 files changed, 11 insertions(+), 31 deletions(-) diff --git a/internal/controllers/constants/constants.go b/internal/controllers/constants/constants.go index 2271d36e..8588d241 100644 --- a/internal/controllers/constants/constants.go +++ b/internal/controllers/constants/constants.go @@ -5,17 +5,12 @@ import "time" type ClusterState string const ( - StorageResourceKind = "Storage" - DatabaseResourceKind = "Database" - StoragePausedCondition = "StoragePaused" StorageInitializedCondition = "StorageReady" StorageNodeSetReadyCondition = "StorageNodeSetReady" - StorageNodeSetsSyncedCondition = "StorageNodeSetsSynced" DatabasePausedCondition = "DatabasePaused" DatabaseTenantInitializedCondition = "TenantInitialized" DatabaseNodeSetReadyCondition = "DatabaseNodeSetReady" - DatabaseNodeSetsSyncedCondition = "DatabaseNodeSetsSynced" Stop = true Continue = false diff --git a/internal/controllers/database/controller.go b/internal/controllers/database/controller.go index 26a8cb70..e39d90d7 100644 --- a/internal/controllers/database/controller.go +++ b/internal/controllers/database/controller.go @@ -57,7 +57,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu err := r.Get(ctx, req.NamespacedName, database) if err != nil { if errors.IsNotFound(err) { - r.Log.Info("%s resources not found", DatabaseResourceKind) + r.Log.Info("%s resources not found", database.Kind) return ctrl.Result{Requeue: false}, nil } r.Log.Error(err, "unexpected Get error") @@ -88,7 +88,8 @@ func ignoreDeletionPredicate() predicate.Predicate { // SetupWithManager sets up the controller with the Manager. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { - controller := ctrl.NewControllerManagedBy(mgr).For(&ydbv1alpha1.Database{}) + resource := &ydbv1alpha1.Database{} + controller := ctrl.NewControllerManagedBy(mgr).For(resource) r.Recorder = mgr.GetEventRecorderFor("Database") @@ -104,7 +105,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return nil } // ...make sure it's a Database... - if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != DatabaseResourceKind { + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != resource.Kind { return nil } @@ -126,7 +127,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return nil } // ...make sure it's a Database... - if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != DatabaseResourceKind { + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != resource.Kind { return nil } diff --git a/internal/controllers/database/sync.go b/internal/controllers/database/sync.go index 7a213ed3..65c2fbbe 100644 --- a/internal/controllers/database/sync.go +++ b/internal/controllers/database/sync.go @@ -562,14 +562,6 @@ func (r *Reconciler) syncNodeSetSpecInline( } } - meta.SetStatusCondition(&database.Status.Conditions, metav1.Condition{ - Type: DatabaseNodeSetsSyncedCondition, - Status: "True", - ObservedGeneration: database.Generation, - Reason: ReasonCompleted, - Message: "Synced (Remote)DatabaseNodeSets with Database spec", - }) - r.Log.Info("syncNodeSetSpecInline complete") return Continue, ctrl.Result{Requeue: false}, nil } diff --git a/internal/controllers/storage/controller.go b/internal/controllers/storage/controller.go index a7186372..fb23c9f7 100644 --- a/internal/controllers/storage/controller.go +++ b/internal/controllers/storage/controller.go @@ -62,7 +62,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu err := r.Get(ctx, req.NamespacedName, storage) if err != nil { if errors.IsNotFound(err) { - r.Log.Info("%s resources not found", StorageResourceKind) + r.Log.Info("%s resources not found", storage.Kind) return ctrl.Result{Requeue: false}, nil } r.Log.Error(err, "unexpected Get error") @@ -93,9 +93,9 @@ func ignoreDeletionPredicate() predicate.Predicate { // SetupWithManager sets up the controller with the Manager. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { - controller := ctrl.NewControllerManagedBy(mgr).For(&ydbv1alpha1.Storage{}) - - r.Recorder = mgr.GetEventRecorderFor(StorageResourceKind) + resource := &ydbv1alpha1.Storage{} + controller := ctrl.NewControllerManagedBy(mgr).For(resource) + r.Recorder = mgr.GetEventRecorderFor("Storage") if r.WithServiceMonitors { controller = controller. @@ -114,7 +114,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return nil } // ...make sure it's a Storage... - if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != StorageResourceKind { + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != resource.Kind { return nil } @@ -136,7 +136,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return nil } // ...make sure it's a Storage... - if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != StorageResourceKind { + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != resource.Kind { return nil } diff --git a/internal/controllers/storage/sync.go b/internal/controllers/storage/sync.go index 64f49166..57c185d2 100644 --- a/internal/controllers/storage/sync.go +++ b/internal/controllers/storage/sync.go @@ -396,14 +396,6 @@ func (r *Reconciler) syncNodeSetSpecInline( } } - meta.SetStatusCondition(&storage.Status.Conditions, metav1.Condition{ - Type: StorageNodeSetsSyncedCondition, - Status: "True", - ObservedGeneration: storage.Generation, - Reason: ReasonCompleted, - Message: "Synced (Remote)StorageNodeSets with Storage spec", - }) - r.Log.Info("syncNodeSetSpecInline complete") return Continue, ctrl.Result{Requeue: false}, nil } From d4fd3f2cbe8e49bf7f184a467964a495df8c4219 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Mon, 12 Feb 2024 14:39:16 +0300 Subject: [PATCH 16/24] use apiutil to recognize resource Kind --- internal/controllers/database/controller.go | 25 +++++++++------- internal/controllers/storage/controller.go | 32 ++++++++++++--------- 2 files changed, 34 insertions(+), 23 deletions(-) diff --git a/internal/controllers/database/controller.go b/internal/controllers/database/controller.go index e39d90d7..1b33ed0a 100644 --- a/internal/controllers/database/controller.go +++ b/internal/controllers/database/controller.go @@ -13,6 +13,7 @@ import ( "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -53,20 +54,21 @@ type Reconciler struct { func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.Log = log.FromContext(ctx) - database := &ydbv1alpha1.Database{} - err := r.Get(ctx, req.NamespacedName, database) + resource := &ydbv1alpha1.Database{} + err := r.Get(ctx, req.NamespacedName, resource) if err != nil { if errors.IsNotFound(err) { - r.Log.Info("%s resources not found", database.Kind) + r.Log.Info("Database resource not found") return ctrl.Result{Requeue: false}, nil } r.Log.Error(err, "unexpected Get error") return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } - result, err := r.Sync(ctx, database) + result, err := r.Sync(ctx, resource) if err != nil { r.Log.Error(err, "unexpected Sync error") } + return result, err } @@ -89,10 +91,14 @@ func ignoreDeletionPredicate() predicate.Predicate { // SetupWithManager sets up the controller with the Manager. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { resource := &ydbv1alpha1.Database{} - controller := ctrl.NewControllerManagedBy(mgr).For(resource) - - r.Recorder = mgr.GetEventRecorderFor("Database") + resourceGVK, err := apiutil.GVKForObject(resource, r.Scheme) + if err != nil { + r.Log.Error(err, "does not recognize GVK for resource") + return err + } + r.Recorder = mgr.GetEventRecorderFor(resourceGVK.Kind) + controller := ctrl.NewControllerManagedBy(mgr).For(resource) if err := mgr.GetFieldIndexer().IndexField( context.Background(), &ydbv1alpha1.RemoteDatabaseNodeSet{}, @@ -105,7 +111,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return nil } // ...make sure it's a Database... - if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != resource.Kind { + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != resourceGVK.Kind { return nil } @@ -114,7 +120,6 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { }); err != nil { return err } - if err := mgr.GetFieldIndexer().IndexField( context.Background(), &ydbv1alpha1.DatabaseNodeSet{}, @@ -127,7 +132,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return nil } // ...make sure it's a Database... - if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != resource.Kind { + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != resourceGVK.Kind { return nil } diff --git a/internal/controllers/storage/controller.go b/internal/controllers/storage/controller.go index fb23c9f7..a8d804ba 100644 --- a/internal/controllers/storage/controller.go +++ b/internal/controllers/storage/controller.go @@ -14,6 +14,7 @@ import ( "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -58,20 +59,21 @@ type Reconciler struct { func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.Log = log.FromContext(ctx) - storage := &ydbv1alpha1.Storage{} - err := r.Get(ctx, req.NamespacedName, storage) + resource := &ydbv1alpha1.Storage{} + err := r.Get(ctx, req.NamespacedName, resource) if err != nil { if errors.IsNotFound(err) { - r.Log.Info("%s resources not found", storage.Kind) + r.Log.Info("Storage resource not found") return ctrl.Result{Requeue: false}, nil } r.Log.Error(err, "unexpected Get error") return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } - result, err := r.Sync(ctx, storage) + result, err := r.Sync(ctx, resource) if err != nil { r.Log.Error(err, "unexpected Sync error") } + return result, err } @@ -94,14 +96,14 @@ func ignoreDeletionPredicate() predicate.Predicate { // SetupWithManager sets up the controller with the Manager. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { resource := &ydbv1alpha1.Storage{} - controller := ctrl.NewControllerManagedBy(mgr).For(resource) - r.Recorder = mgr.GetEventRecorderFor("Storage") - - if r.WithServiceMonitors { - controller = controller. - Owns(&monitoringv1.ServiceMonitor{}) + resourceGVK, err := apiutil.GVKForObject(resource, r.Scheme) + if err != nil { + r.Log.Error(err, "does not recognize GVK for resource") + return err } + r.Recorder = mgr.GetEventRecorderFor(resourceGVK.Kind) + controller := ctrl.NewControllerManagedBy(mgr).For(resource) if err := mgr.GetFieldIndexer().IndexField( context.Background(), &ydbv1alpha1.RemoteStorageNodeSet{}, @@ -114,7 +116,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return nil } // ...make sure it's a Storage... - if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != resource.Kind { + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != resourceGVK.Kind { return nil } @@ -123,7 +125,6 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { }); err != nil { return err } - if err := mgr.GetFieldIndexer().IndexField( context.Background(), &ydbv1alpha1.StorageNodeSet{}, @@ -136,7 +137,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return nil } // ...make sure it's a Storage... - if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != resource.Kind { + if owner.APIVersion != ydbv1alpha1.GroupVersion.String() || owner.Kind != resourceGVK.Kind { return nil } @@ -146,6 +147,11 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return err } + if r.WithServiceMonitors { + controller = controller. + Owns(&monitoringv1.ServiceMonitor{}) + } + return controller. Owns(&ydbv1alpha1.RemoteStorageNodeSet{}). Owns(&ydbv1alpha1.StorageNodeSet{}). From ed19e24644b77357dac0677391cf12403abd52bd Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Mon, 12 Feb 2024 14:40:54 +0300 Subject: [PATCH 17/24] update crds and generated deepcopy --- api/v1alpha1/zz_generated.deepcopy.go | 20 +++++++++---------- deploy/ydb-operator/crds/database.yaml | 14 ++++++++----- deploy/ydb-operator/crds/databasenodeset.yaml | 4 ++++ .../crds/remotedatabasenodeset.yaml | 4 ++++ .../crds/remotestoragenodeset.yaml | 4 ++++ deploy/ydb-operator/crds/storage.yaml | 4 ++++ deploy/ydb-operator/crds/storagenodeset.yaml | 4 ++++ 7 files changed, 39 insertions(+), 15 deletions(-) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 0b11cd2c..6856180f 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -474,6 +474,11 @@ func (in *DatabaseNodeSpec) DeepCopyInto(out *DatabaseNodeSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } if in.AdditionalLabels != nil { in, out := &in.AdditionalLabels, &out.AdditionalLabels *out = make(map[string]string, len(*in)) @@ -488,11 +493,6 @@ func (in *DatabaseNodeSpec) DeepCopyInto(out *DatabaseNodeSpec) { (*out)[key] = val } } - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseNodeSpec. @@ -1377,6 +1377,11 @@ func (in *StorageNodeSpec) DeepCopyInto(out *StorageNodeSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } if in.AdditionalLabels != nil { in, out := &in.AdditionalLabels, &out.AdditionalLabels *out = make(map[string]string, len(*in)) @@ -1391,11 +1396,6 @@ func (in *StorageNodeSpec) DeepCopyInto(out *StorageNodeSpec) { (*out)[key] = val } } - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageNodeSpec. diff --git a/deploy/ydb-operator/crds/database.yaml b/deploy/ydb-operator/crds/database.yaml index 8f3b6a54..992ccbbd 100644 --- a/deploy/ydb-operator/crds/database.yaml +++ b/deploy/ydb-operator/crds/database.yaml @@ -2266,8 +2266,8 @@ spec: labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' type: object nodeSets: - description: (Optional) NodeSet inline configuration to split into - multiple StatefulSets + description: '(Optional) NodeSet inline configuration to split into + multiple StatefulSets Default: (not specified)' items: description: DatabaseNodeSetSpecInline describes an group nodes object inside parent object @@ -3333,6 +3333,10 @@ spec: type: object type: array type: object + terminationGracePeriodSeconds: + description: (Optional) If specified, the pod's terminationGracePeriodSeconds. + format: int64 + type: integer tolerations: description: (Optional) If specified, the pod's tolerations. items: @@ -4069,13 +4073,13 @@ spec: required: - name type: object + storageEndpoint: + description: YDB Storage Node broker address + type: string terminationGracePeriodSeconds: description: (Optional) If specified, the pod's terminationGracePeriodSeconds. format: int64 type: integer - storageEndpoint: - description: YDB Storage Node broker address - type: string tolerations: description: (Optional) If specified, the pod's tolerations. items: diff --git a/deploy/ydb-operator/crds/databasenodeset.yaml b/deploy/ydb-operator/crds/databasenodeset.yaml index f2bcc4e2..2fd02003 100644 --- a/deploy/ydb-operator/crds/databasenodeset.yaml +++ b/deploy/ydb-operator/crds/databasenodeset.yaml @@ -2791,6 +2791,10 @@ spec: storageEndpoint: description: YDB Storage Node broker address type: string + terminationGracePeriodSeconds: + description: (Optional) If specified, the pod's terminationGracePeriodSeconds. + format: int64 + type: integer tolerations: description: (Optional) If specified, the pod's tolerations. items: diff --git a/deploy/ydb-operator/crds/remotedatabasenodeset.yaml b/deploy/ydb-operator/crds/remotedatabasenodeset.yaml index 8acaaed4..cdc58359 100644 --- a/deploy/ydb-operator/crds/remotedatabasenodeset.yaml +++ b/deploy/ydb-operator/crds/remotedatabasenodeset.yaml @@ -2792,6 +2792,10 @@ spec: storageEndpoint: description: YDB Storage Node broker address type: string + terminationGracePeriodSeconds: + description: (Optional) If specified, the pod's terminationGracePeriodSeconds. + format: int64 + type: integer tolerations: description: (Optional) If specified, the pod's tolerations. items: diff --git a/deploy/ydb-operator/crds/remotestoragenodeset.yaml b/deploy/ydb-operator/crds/remotestoragenodeset.yaml index d3785dea..cc7a4efd 100644 --- a/deploy/ydb-operator/crds/remotestoragenodeset.yaml +++ b/deploy/ydb-operator/crds/remotestoragenodeset.yaml @@ -2785,6 +2785,10 @@ spec: required: - name type: object + terminationGracePeriodSeconds: + description: (Optional) If specified, the pod's terminationGracePeriodSeconds. + format: int64 + type: integer tolerations: description: (Optional) If specified, the pod's tolerations. items: diff --git a/deploy/ydb-operator/crds/storage.yaml b/deploy/ydb-operator/crds/storage.yaml index fc7841fe..b94c77b4 100644 --- a/deploy/ydb-operator/crds/storage.yaml +++ b/deploy/ydb-operator/crds/storage.yaml @@ -3613,6 +3613,10 @@ spec: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + terminationGracePeriodSeconds: + description: (Optional) If specified, the pod's terminationGracePeriodSeconds. + format: int64 + type: integer tolerations: description: (Optional) If specified, the pod's tolerations. items: diff --git a/deploy/ydb-operator/crds/storagenodeset.yaml b/deploy/ydb-operator/crds/storagenodeset.yaml index dc30ed0e..26c638c9 100644 --- a/deploy/ydb-operator/crds/storagenodeset.yaml +++ b/deploy/ydb-operator/crds/storagenodeset.yaml @@ -2784,6 +2784,10 @@ spec: required: - name type: object + terminationGracePeriodSeconds: + description: (Optional) If specified, the pod's terminationGracePeriodSeconds. + format: int64 + type: integer tolerations: description: (Optional) If specified, the pod's tolerations. items: From b77006611a865402efc6ed55c7ef18a117dd0c35 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Tue, 6 Feb 2024 19:04:46 +0300 Subject: [PATCH 18/24] fixes tests --- go.mod | 1 + go.sum | 2 ++ internal/controllers/constants/constants.go | 3 +++ 3 files changed, 6 insertions(+) diff --git a/go.mod b/go.mod index 66173cf1..6bf8df2a 100644 --- a/go.mod +++ b/go.mod @@ -46,6 +46,7 @@ require ( github.com/google/pprof v0.0.0-20230510103437-eeec1cb781c3 // indirect github.com/google/uuid v1.3.0 // indirect github.com/imdario/mergo v0.3.13 // indirect + github.com/jgautheron/goconst v1.7.0 // indirect github.com/jonboulle/clockwork v0.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect diff --git a/go.sum b/go.sum index d51cc5f9..1b0fccd6 100644 --- a/go.sum +++ b/go.sum @@ -249,6 +249,8 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jgautheron/goconst v1.7.0 h1:cEqH+YBKLsECnRSd4F4TK5ri8t/aXtt/qoL0Ft252B0= +github.com/jgautheron/goconst v1.7.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= diff --git a/internal/controllers/constants/constants.go b/internal/controllers/constants/constants.go index 8588d241..47c52e85 100644 --- a/internal/controllers/constants/constants.go +++ b/internal/controllers/constants/constants.go @@ -5,6 +5,9 @@ import "time" type ClusterState string const ( + StorageResourceKind = "Storage" + DatabaseResourceKind = "Database" + StoragePausedCondition = "StoragePaused" StorageInitializedCondition = "StorageReady" StorageNodeSetReadyCondition = "StorageNodeSetReady" From fcfe3af3d8a337976a6cf270bf0a5d9405027e58 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Mon, 12 Feb 2024 15:28:03 +0300 Subject: [PATCH 19/24] get resource kind by gvk apiutil use remoteRecorder only for status update --- .../remotedatabasenodeset/controller.go | 14 ++++- .../controllers/remotedatabasenodeset/sync.go | 28 ++++++--- .../remotestoragenodeset/controller.go | 15 ++++- .../remotestoragenodeset/controller_test.go | 60 +++++++++++++++++++ .../controllers/remotestoragenodeset/sync.go | 28 ++++++--- 5 files changed, 124 insertions(+), 21 deletions(-) diff --git a/internal/controllers/remotedatabasenodeset/controller.go b/internal/controllers/remotedatabasenodeset/controller.go index 6c1a2372..babadef9 100644 --- a/internal/controllers/remotedatabasenodeset/controller.go +++ b/internal/controllers/remotedatabasenodeset/controller.go @@ -12,6 +12,7 @@ import ( "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/cluster" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" @@ -27,6 +28,7 @@ import ( type Reconciler struct { Client client.Client RemoteClient client.Client + Recorder record.EventRecorder RemoteRecorder record.EventRecorder Log logr.Logger Scheme *runtime.Scheme @@ -86,8 +88,15 @@ func (r *Reconciler) handleRemoteResourceDeleted(ctx context.Context, req ctrl.R // SetupWithManager sets up the controller with the Manager. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.Cluster) error { cluster := *remoteCluster + resource := &api.RemoteDatabaseNodeSet{} + resourceGVK, err := apiutil.GVKForObject(resource, r.Scheme) + if err != nil { + r.Log.Error(err, "does not recognize GVK for resource") + return err + } - r.RemoteRecorder = cluster.GetEventRecorderFor("RemoteDatabaseNodeSet") + r.Recorder = mgr.GetEventRecorderFor(resourceGVK.Kind) + r.RemoteRecorder = cluster.GetEventRecorderFor(resourceGVK.Kind) annotationFilter := func(mapObj client.Object) []reconcile.Request { requests := make([]reconcile.Request, 0) @@ -108,7 +117,8 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.C } return ctrl.NewControllerManagedBy(mgr). - Watches(source.NewKindWithCache(&api.RemoteDatabaseNodeSet{}, cluster.GetCache()), &handler.EnqueueRequestForObject{}). + Named(resourceGVK.Kind). + Watches(source.NewKindWithCache(resource, cluster.GetCache()), &handler.EnqueueRequestForObject{}). Watches(&source.Kind{Type: &api.DatabaseNodeSet{}}, handler.EnqueueRequestsFromMapFunc(annotationFilter)). Complete(r) } diff --git a/internal/controllers/remotedatabasenodeset/sync.go b/internal/controllers/remotedatabasenodeset/sync.go index 94695062..31ffa5cb 100644 --- a/internal/controllers/remotedatabasenodeset/sync.go +++ b/internal/controllers/remotedatabasenodeset/sync.go @@ -48,7 +48,7 @@ func (r *Reconciler) handleResourcesSync( result, err := resources.CreateOrUpdateOrMaybeIgnore(ctx, r.Client, newResource, func() error { err := builder.Build(newResource) if err != nil { - r.RemoteRecorder.Event( + r.Recorder.Event( remoteDatabaseNodeSet, corev1.EventTypeWarning, "ProvisioningFailed", @@ -75,7 +75,7 @@ func (r *Reconciler) handleResourcesSync( newResource.GetName(), ) if err != nil { - r.RemoteRecorder.Event( + r.Recorder.Event( remoteDatabaseNodeSet, corev1.EventTypeWarning, "ProvisioningFailed", @@ -83,7 +83,7 @@ func (r *Reconciler) handleResourcesSync( ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } else if result == controllerutil.OperationResultCreated || result == controllerutil.OperationResultUpdated { - r.RemoteRecorder.Event( + r.Recorder.Event( remoteDatabaseNodeSet, corev1.EventTypeNormal, "Provisioning", @@ -108,19 +108,19 @@ func (r *Reconciler) updateStatus( }, &databaseNodeSet) if err != nil { if errors.IsNotFound(err) { - r.RemoteRecorder.Event( + r.Recorder.Event( remoteDatabaseNodeSet, corev1.EventTypeWarning, "ProvisioningFailed", - fmt.Sprintf("databaseNodeSet with name %s was not found: %s", remoteDatabaseNodeSet.Name, err), + fmt.Sprintf("DatabaseNodeSet with name %s was not found: %s", remoteDatabaseNodeSet.Name, err), ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, nil } - r.RemoteRecorder.Event( + r.Recorder.Event( remoteDatabaseNodeSet, corev1.EventTypeWarning, "ControllerError", - fmt.Sprintf("Failed to get databaseNodeSet: %s", err), + fmt.Sprintf("Failed to get DatabaseNodeSet: %s", err), ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } @@ -131,6 +131,12 @@ func (r *Reconciler) updateStatus( err = r.RemoteClient.Status().Update(ctx, remoteDatabaseNodeSet) if err != nil { + r.Recorder.Event( + remoteDatabaseNodeSet, + corev1.EventTypeWarning, + "ControllerError", + fmt.Sprintf("Failed setting status on remote cluster: %s", err), + ) r.RemoteRecorder.Event( remoteDatabaseNodeSet, corev1.EventTypeWarning, @@ -139,11 +145,17 @@ func (r *Reconciler) updateStatus( ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } else if oldStatus != databaseNodeSet.Status.State { + r.Recorder.Event( + remoteDatabaseNodeSet, + corev1.EventTypeNormal, + "StatusChanged", + fmt.Sprintf("RemoteDatabaseNodeSet moved from %s to %s on remote cluster", oldStatus, remoteDatabaseNodeSet.Status.State), + ) r.RemoteRecorder.Event( remoteDatabaseNodeSet, corev1.EventTypeNormal, "StatusChanged", - fmt.Sprintf("databaseNodeSet moved from %s to %s", oldStatus, databaseNodeSet.Status.State), + fmt.Sprintf("RemoteDatabaseNodeSet moved from %s to %s", oldStatus, remoteDatabaseNodeSet.Status.State), ) } diff --git a/internal/controllers/remotestoragenodeset/controller.go b/internal/controllers/remotestoragenodeset/controller.go index 1b44e850..e7e72c72 100644 --- a/internal/controllers/remotestoragenodeset/controller.go +++ b/internal/controllers/remotestoragenodeset/controller.go @@ -12,6 +12,7 @@ import ( "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/cluster" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -28,6 +29,7 @@ import ( type Reconciler struct { Client client.Client RemoteClient client.Client + Recorder record.EventRecorder RemoteRecorder record.EventRecorder Log logr.Logger Scheme *runtime.Scheme @@ -123,8 +125,15 @@ func (r *Reconciler) deleteExternalResources(ctx context.Context, remoteStorageN // SetupWithManager sets up the controller with the Manager. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.Cluster) error { cluster := *remoteCluster + resource := &api.RemoteStorageNodeSet{} + resourceGVK, err := apiutil.GVKForObject(resource, r.Scheme) + if err != nil { + r.Log.Error(err, "does not recognize GVK for resource") + return err + } - r.RemoteRecorder = cluster.GetEventRecorderFor("RemoteStorageNodeSet") + r.Recorder = mgr.GetEventRecorderFor(resourceGVK.Kind) + r.RemoteRecorder = cluster.GetEventRecorderFor(resourceGVK.Kind) annotationFilter := func(mapObj client.Object) []reconcile.Request { requests := make([]reconcile.Request, 0) @@ -145,8 +154,8 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.C } return ctrl.NewControllerManagedBy(mgr). - Named("remotestoragenodeset"). - Watches(source.NewKindWithCache(&api.RemoteStorageNodeSet{}, cluster.GetCache()), &handler.EnqueueRequestForObject{}). + Named(resourceGVK.Kind). + Watches(source.NewKindWithCache(resource, cluster.GetCache()), &handler.EnqueueRequestForObject{}). Watches(&source.Kind{Type: &api.StorageNodeSet{}}, handler.EnqueueRequestsFromMapFunc(annotationFilter)). Complete(r) } diff --git a/internal/controllers/remotestoragenodeset/controller_test.go b/internal/controllers/remotestoragenodeset/controller_test.go index 02e29eb1..1e996c7a 100644 --- a/internal/controllers/remotestoragenodeset/controller_test.go +++ b/internal/controllers/remotestoragenodeset/controller_test.go @@ -8,7 +8,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/kubectl/pkg/scheme" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -20,6 +22,7 @@ import ( api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" testobjects "github.com/ydb-platform/ydb-kubernetes-operator/e2e/tests/test-objects" + . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/remotestoragenodeset" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storage" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storagenodeset" @@ -189,7 +192,14 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { When("Create Storage with RemoteStorageNodeSet in k8s-mgmt-cluster", func() { It("Should create StorageNodeSet in k8s-data-cluster", func() { + By("issuing create commands...") storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, api.StorageNodeSetSpecInline{ + Name: testNodeSetName + "-local", + StorageNodeSpec: api.StorageNodeSpec{ + Nodes: 4, + }, + }) storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, api.StorageNodeSetSpecInline{ Name: testNodeSetName + "-remote", Remote: &api.RemoteSpec{ @@ -200,6 +210,24 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { }, }) Expect(localClient.Create(ctx, storageSample)).Should(Succeed()) + + By("checking that StorageNodeSet created on local cluster...") + Eventually(func() bool { + foundStorageNodeSet := api.StorageNodeSetList{} + + Expect(localClient.List(ctx, &foundStorageNodeSet, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + for _, nodeset := range foundStorageNodeSet.Items { + if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-local" { + return true + } + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) + + By("checking that RemoteStorageNodeSet created on local cluster...") Eventually(func() bool { foundRemoteStorageNodeSet := api.RemoteStorageNodeSetList{} @@ -214,6 +242,8 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { } return false }, test.Timeout, test.Interval).Should(BeTrue()) + + By("checking that StorageNodeSet created on remote cluster...") Eventually(func() bool { foundStorageNodeSetInRemote := api.StorageNodeSetList{} @@ -228,6 +258,36 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { } return false }, test.Timeout, test.Interval).Should(BeTrue()) + + By("checking that StorageNodeSet status is Ready on remote cluster...") + Eventually(func() bool { + foundStorageNodeSetOnRemote := api.StorageNodeSet{} + Expect(remoteClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name + "-" + testNodeSetName + "-remote", + Namespace: testobjects.YdbNamespace, + }, &foundStorageNodeSetOnRemote)).Should(Succeed()) + + return meta.IsStatusConditionPresentAndEqual( + foundStorageNodeSetOnRemote.Status.Conditions, + StorageNodeSetReadyCondition, + metav1.ConditionTrue, + ) && foundStorageNodeSetOnRemote.Status.State == StorageNodeSetReady + }, test.Timeout, test.Interval).Should(BeTrue()) + + By("checking that RemoteStorageNodeSet status updated on local cluster...") + Eventually(func() bool { + foundRemoteStorageNodeSetOnRemote := api.RemoteStorageNodeSet{} + Expect(localClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name + "-" + testNodeSetName + "-remote", + Namespace: testobjects.YdbNamespace, + }, &foundRemoteStorageNodeSetOnRemote)).Should(Succeed()) + + return meta.IsStatusConditionPresentAndEqual( + foundRemoteStorageNodeSetOnRemote.Status.Conditions, + StorageNodeSetReadyCondition, + metav1.ConditionTrue, + ) && foundRemoteStorageNodeSetOnRemote.Status.State == StorageNodeSetReady + }, test.Timeout, test.Interval).Should(BeTrue()) }) }) }) diff --git a/internal/controllers/remotestoragenodeset/sync.go b/internal/controllers/remotestoragenodeset/sync.go index 04bcb312..95e4db57 100644 --- a/internal/controllers/remotestoragenodeset/sync.go +++ b/internal/controllers/remotestoragenodeset/sync.go @@ -48,7 +48,7 @@ func (r *Reconciler) handleResourcesSync( result, err := resources.CreateOrUpdateOrMaybeIgnore(ctx, r.Client, newResource, func() error { err := builder.Build(newResource) if err != nil { - r.RemoteRecorder.Event( + r.Recorder.Event( remoteStorageNodeSet, corev1.EventTypeWarning, "ProvisioningFailed", @@ -75,7 +75,7 @@ func (r *Reconciler) handleResourcesSync( newResource.GetName(), ) if err != nil { - r.RemoteRecorder.Event( + r.Recorder.Event( remoteStorageNodeSet, corev1.EventTypeWarning, "ProvisioningFailed", @@ -83,7 +83,7 @@ func (r *Reconciler) handleResourcesSync( ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } else if result == controllerutil.OperationResultCreated || result == controllerutil.OperationResultUpdated { - r.RemoteRecorder.Event( + r.Recorder.Event( remoteStorageNodeSet, corev1.EventTypeNormal, "Provisioning", @@ -108,19 +108,19 @@ func (r *Reconciler) updateStatus( }, &storageNodeSet) if err != nil { if errors.IsNotFound(err) { - r.RemoteRecorder.Event( + r.Recorder.Event( remoteStorageNodeSet, corev1.EventTypeWarning, "ProvisioningFailed", - fmt.Sprintf("StorageNodeSet with name %s was not found on remote: %s", remoteStorageNodeSet.Name, err), + fmt.Sprintf("StorageNodeSet with name %s was not found: %s", remoteStorageNodeSet.Name, err), ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, nil } - r.RemoteRecorder.Event( + r.Recorder.Event( remoteStorageNodeSet, corev1.EventTypeWarning, "ControllerError", - fmt.Sprintf("Failed to get StorageNodeSet on remote: %s", err), + fmt.Sprintf("Failed to get StorageNodeSet: %s", err), ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } @@ -131,6 +131,12 @@ func (r *Reconciler) updateStatus( err = r.RemoteClient.Status().Update(ctx, remoteStorageNodeSet) if err != nil { + r.Recorder.Event( + remoteStorageNodeSet, + corev1.EventTypeWarning, + "ControllerError", + fmt.Sprintf("Failed setting status on remote cluster: %s", err), + ) r.RemoteRecorder.Event( remoteStorageNodeSet, corev1.EventTypeWarning, @@ -139,11 +145,17 @@ func (r *Reconciler) updateStatus( ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } else if oldStatus != storageNodeSet.Status.State { + r.Recorder.Event( + remoteStorageNodeSet, + corev1.EventTypeNormal, + "StatusChanged", + fmt.Sprintf("RemoteStorageNodeSet moved from %s to %s on remote cluster", oldStatus, remoteStorageNodeSet.Status.State), + ) r.RemoteRecorder.Event( remoteStorageNodeSet, corev1.EventTypeNormal, "StatusChanged", - fmt.Sprintf("RemoteStorageNodeSet moved from %s to %s", oldStatus, storageNodeSet.Status.State), + fmt.Sprintf("RemoteStorageNodeSet moved from %s to %s", oldStatus, remoteStorageNodeSet.Status.State), ) } From 10ed733eefc8ce948ef556a9867a31fef4a025ed Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Mon, 12 Feb 2024 17:04:12 +0300 Subject: [PATCH 20/24] ut tests for deletion --- .../remotedatabasenodeset/controller.go | 82 +++- .../remotedatabasenodeset/controller_test.go | 388 ++++++++++++++++++ .../remotestoragenodeset/controller.go | 5 + .../remotestoragenodeset/controller_test.go | 104 ++++- 4 files changed, 541 insertions(+), 38 deletions(-) create mode 100644 internal/controllers/remotedatabasenodeset/controller_test.go diff --git a/internal/controllers/remotedatabasenodeset/controller.go b/internal/controllers/remotedatabasenodeset/controller.go index babadef9..086f789e 100644 --- a/internal/controllers/remotedatabasenodeset/controller.go +++ b/internal/controllers/remotedatabasenodeset/controller.go @@ -4,7 +4,7 @@ import ( "context" "github.com/go-logr/logr" - apierrs "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/selection" @@ -14,6 +14,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -40,19 +41,58 @@ type Reconciler struct { //+kubebuilder:rbac:groups=ydb.tech,resources=databasenodesets,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=ydb.tech,resources=databasenodesets/status,verbs=get;update;patch //+kubebuilder:rbac:groups=ydb.tech,resources=databasenodesets/finalizers,verbs=update +//+kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=services/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=core,resources=services/finalizers,verbs=get;list;watch +//+kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=secrets/status,verbs=get;update;patch func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { logger := log.FromContext(ctx) remoteDatabaseNodeSet := &api.RemoteDatabaseNodeSet{} - + // we'll ignore not-found errors, since they can't be fixed by an immediate + // requeue (we'll need to wait for a new notification), and we can get them + // on deleted requests. if err := r.RemoteClient.Get(ctx, req.NamespacedName, remoteDatabaseNodeSet); err != nil { - if apierrs.IsNotFound(err) { - logger.Info("RemotedatabaseNodeSet has been deleted") - return r.handleRemoteResourceDeleted(ctx, req) + if apierrors.IsNotFound(err) { + logger.Info("DatabaseNodeSet has been deleted") + return ctrl.Result{Requeue: false}, nil } - logger.Error(err, "unable to get RemotedatabaseNodeSet") - return ctrl.Result{}, err + logger.Error(err, "unable to get RemoteDatabaseNodeSet") + return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, nil + } + + // examine DeletionTimestamp to determine if object is under deletion + if remoteDatabaseNodeSet.ObjectMeta.DeletionTimestamp.IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. This is equivalent + // to registering our finalizer. + if !controllerutil.ContainsFinalizer(remoteDatabaseNodeSet, RemoteFinalizerKey) { + controllerutil.AddFinalizer(remoteDatabaseNodeSet, RemoteFinalizerKey) + if err := r.RemoteClient.Update(ctx, remoteDatabaseNodeSet); err != nil { + return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } + } + } else { + // The object is being deleted + if controllerutil.ContainsFinalizer(remoteDatabaseNodeSet, RemoteFinalizerKey) { + // our finalizer is present, so lets handle any external dependency + if err := r.deleteExternalResources(ctx, remoteDatabaseNodeSet); err != nil { + // if fail to delete the external dependency here, return with error + // so that it can be retried. + return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } + + // remove our finalizer from the list and update it. + controllerutil.RemoveFinalizer(remoteDatabaseNodeSet, RemoteFinalizerKey) + if err := r.RemoteClient.Update(ctx, remoteDatabaseNodeSet); err != nil { + return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } + } + + // Stop reconciliation as the item is being deleted + return ctrl.Result{Requeue: false}, nil } result, err := r.Sync(ctx, remoteDatabaseNodeSet) @@ -63,26 +103,28 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return result, err } -func (r *Reconciler) handleRemoteResourceDeleted(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *Reconciler) deleteExternalResources(ctx context.Context, remoteDatabaseNodeSet *api.RemoteDatabaseNodeSet) error { logger := log.FromContext(ctx) - databaseNodeSet := &api.DatabaseNodeSet{} - - if err := r.Client.Get(ctx, req.NamespacedName, databaseNodeSet); err != nil { - if apierrs.IsNotFound(err) { - logger.Info("databaseNodeSet has been deleted") - return ctrl.Result{Requeue: false}, nil + storageNodeSet := &api.StorageNodeSet{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Name: remoteDatabaseNodeSet.Name, + Namespace: remoteDatabaseNodeSet.Namespace, + }, storageNodeSet); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("DatabaseNodeSet not found") + return nil } - logger.Error(err, "unable to get databaseNodeSet") - return ctrl.Result{}, err + logger.Error(err, "unable to get DatabaseNodeSet") + return err } - if err := r.Client.Delete(ctx, databaseNodeSet); err != nil { - logger.Error(err, "unable to delete databaseNodeSet") - return ctrl.Result{}, err + if err := r.Client.Delete(ctx, storageNodeSet); err != nil { + logger.Error(err, "unable to delete DatabaseNodeSet") + return err } - return ctrl.Result{Requeue: false}, nil + return nil } // SetupWithManager sets up the controller with the Manager. diff --git a/internal/controllers/remotedatabasenodeset/controller_test.go b/internal/controllers/remotedatabasenodeset/controller_test.go new file mode 100644 index 00000000..0934f383 --- /dev/null +++ b/internal/controllers/remotedatabasenodeset/controller_test.go @@ -0,0 +1,388 @@ +package remotedatabasenodeset_test + +import ( + "context" + "fmt" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubectl/pkg/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" + testobjects "github.com/ydb-platform/ydb-kubernetes-operator/e2e/tests/test-objects" + . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/database" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/databasenodeset" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/remotedatabasenodeset" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/test" +) + +const ( + testRemoteCluster = "remote-cluster" + testNodeSetName = "nodeset" +) + +var ( + localClient client.Client + remoteClient client.Client + localEnv *envtest.Environment + remoteEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc +) + +func TestRemoteNodeSetApis(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "RemoteDatabaseNodeSet controller tests") +} + +var _ = BeforeSuite(func() { + By("bootstrapping test environment") + + ctx, cancel = context.WithCancel(context.TODO()) + + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + localEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")}, + } + remoteEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")}, + } + + err := api.AddToScheme(scheme.Scheme) + Expect(err).ShouldNot(HaveOccurred()) + + localCfg, err := localEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(localCfg).ToNot(BeNil()) + + remoteCfg, err := remoteEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(remoteCfg).ToNot(BeNil()) + + // +kubebuilder:scaffold:scheme + + localManager, err := ctrl.NewManager(localCfg, ctrl.Options{ + MetricsBindAddress: "0", + Scheme: scheme.Scheme, + }) + Expect(err).ShouldNot(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + remoteManager, err := ctrl.NewManager(remoteCfg, ctrl.Options{ + MetricsBindAddress: "0", + Scheme: scheme.Scheme, + }) + Expect(err).ShouldNot(HaveOccurred()) + + databaseSelector, err := remotedatabasenodeset.BuildRemoteSelector(testRemoteCluster) + Expect(err).ShouldNot(HaveOccurred()) + + remoteCluster, err := cluster.New(localCfg, func(o *cluster.Options) { + o.Scheme = scheme.Scheme + o.NewCache = cache.BuilderWithOptions(cache.Options{ + SelectorsByObject: cache.SelectorsByObject{ + &api.RemoteDatabaseNodeSet{}: {Label: databaseSelector}, + }, + }) + }) + Expect(err).ShouldNot(HaveOccurred()) + + err = remoteManager.Add(remoteCluster) + Expect(err).ShouldNot(HaveOccurred()) + + err = (&database.Reconciler{ + Client: localManager.GetClient(), + Scheme: localManager.GetScheme(), + Config: localManager.GetConfig(), + Recorder: localManager.GetEventRecorderFor("ydb-operator"), + }).SetupWithManager(localManager) + Expect(err).ShouldNot(HaveOccurred()) + + err = (&databasenodeset.Reconciler{ + Client: localManager.GetClient(), + Scheme: localManager.GetScheme(), + Config: localManager.GetConfig(), + Recorder: localManager.GetEventRecorderFor("ydb-operator"), + }).SetupWithManager(localManager) + Expect(err).ShouldNot(HaveOccurred()) + + err = (&databasenodeset.Reconciler{ + Client: remoteManager.GetClient(), + Scheme: remoteManager.GetScheme(), + Config: remoteManager.GetConfig(), + Recorder: remoteManager.GetEventRecorderFor("ydb-operator"), + }).SetupWithManager(remoteManager) + Expect(err).ShouldNot(HaveOccurred()) + + err = (&remotedatabasenodeset.Reconciler{ + Client: remoteManager.GetClient(), + RemoteClient: localManager.GetClient(), + Scheme: remoteManager.GetScheme(), + RemoteRecorder: remoteManager.GetEventRecorderFor("ydb-operator"), + }).SetupWithManager(remoteManager, &remoteCluster) + Expect(err).ShouldNot(HaveOccurred()) + + go func() { + defer GinkgoRecover() + err = localManager.Start(ctx) + Expect(err).ShouldNot(HaveOccurred()) + }() + + go func() { + defer GinkgoRecover() + err = remoteManager.Start(ctx) + Expect(err).ShouldNot(HaveOccurred()) + }() + + localClient = localManager.GetClient() + Expect(localClient).ToNot(BeNil()) + remoteClient = remoteManager.GetClient() + Expect(remoteClient).ToNot(BeNil()) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := localEnv.Stop() + Expect(err).ToNot(HaveOccurred()) + err = remoteEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) + +var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { + var localNamespace corev1.Namespace + var remoteNamespace corev1.Namespace + var storageSample *api.Storage + var databaseSample *api.Database + + BeforeEach(func() { + localNamespace = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testobjects.YdbNamespace, + }, + } + remoteNamespace = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testobjects.YdbNamespace, + }, + } + Expect(localClient.Create(ctx, &localNamespace)).Should(Succeed()) + Expect(remoteClient.Create(ctx, &remoteNamespace)).Should(Succeed()) + }) + + AfterEach(func() { + Expect(localClient.Delete(ctx, &localNamespace)).Should(Succeed()) + Expect(remoteClient.Delete(ctx, &remoteNamespace)).Should(Succeed()) + }) + + When("Create database with RemoteDatabaseNodeSet in k8s-mgmt-cluster", func() { + It("Should create databaseNodeSet and sync resources in k8s-data-cluster", func() { + By("issuing create commands...") + storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + databaseSample = testobjects.DefaultDatabase() + databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, api.DatabaseNodeSetSpecInline{ + Name: testNodeSetName + "-local", + DatabaseNodeSpec: api.DatabaseNodeSpec{ + Nodes: 4, + }, + }) + databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, api.DatabaseNodeSetSpecInline{ + Name: testNodeSetName + "-remote", + Remote: &api.RemoteSpec{ + Cluster: testRemoteCluster, + }, + DatabaseNodeSpec: api.DatabaseNodeSpec{ + Nodes: 4, + }, + }) + Expect(localClient.Create(ctx, storageSample)).Should(Succeed()) + Expect(localClient.Create(ctx, databaseSample)).Should(Succeed()) + + By("checking that Storage created on local cluster...") + Eventually(func() error { + foundStorage := api.Storage{} + + return localClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name, + Namespace: testobjects.YdbNamespace, + }, &foundStorage) + + }, test.Timeout, test.Interval).ShouldNot(HaveOccurred()) + + By("checking that databaseNodeSet created on local cluster...") + Eventually(func() bool { + foundDatabaseNodeSet := api.DatabaseNodeSetList{} + + Expect(localClient.List(ctx, &foundDatabaseNodeSet, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + for _, nodeset := range foundDatabaseNodeSet.Items { + if nodeset.Name == databaseSample.Name+"-"+testNodeSetName+"-local" { + return true + } + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) + + By("checking that RemoteDatabaseNodeSet created on local cluster...") + Eventually(func() bool { + foundRemoteDatabaseNodeSet := api.RemoteDatabaseNodeSetList{} + + Expect(localClient.List(ctx, &foundRemoteDatabaseNodeSet, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + for _, nodeset := range foundRemoteDatabaseNodeSet.Items { + if nodeset.Name == databaseSample.Name+"-"+testNodeSetName+"-remote" { + return true + } + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) + + By("checking that databaseNodeSet created on remote cluster...") + Eventually(func() bool { + founddatabaseNodeSetOnRemote := api.DatabaseNodeSetList{} + + Expect(remoteClient.List(ctx, &founddatabaseNodeSetOnRemote, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + for _, nodeset := range founddatabaseNodeSetOnRemote.Items { + if nodeset.Name == databaseSample.Name+"-"+testNodeSetName+"-remote" { + return true + } + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) + + By("Set databaseNodeSet status to Ready on remote cluster...") + founddatabaseNodeSetOnRemote := api.DatabaseNodeSet{} + Expect(remoteClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name + "-" + testNodeSetName + "-remote", + Namespace: testobjects.YdbNamespace, + }, &founddatabaseNodeSetOnRemote)).Should(Succeed()) + + founddatabaseNodeSetOnRemote.Status.State = DatabaseNodeSetReady + founddatabaseNodeSetOnRemote.Status.Conditions = append( + founddatabaseNodeSetOnRemote.Status.Conditions, + metav1.Condition{ + Type: DatabaseNodeSetReadyCondition, + Status: "True", + Reason: ReasonCompleted, + Message: fmt.Sprintf("Scaled databaseNodeSet to %d successfully", founddatabaseNodeSetOnRemote.Spec.Nodes), + }, + ) + Expect(remoteClient.Status().Update(ctx, &founddatabaseNodeSetOnRemote)).Should(Succeed()) + + By("checking that RemoteDatabaseNodeSet status updated on local cluster...") + Eventually(func() bool { + foundRemoteDatabaseNodeSetOnRemote := api.RemoteDatabaseNodeSet{} + Expect(localClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name + "-" + testNodeSetName + "-remote", + Namespace: testobjects.YdbNamespace, + }, &foundRemoteDatabaseNodeSetOnRemote)).Should(Succeed()) + + return meta.IsStatusConditionPresentAndEqual( + foundRemoteDatabaseNodeSetOnRemote.Status.Conditions, + DatabaseNodeSetReadyCondition, + metav1.ConditionTrue, + ) && foundRemoteDatabaseNodeSetOnRemote.Status.State == DatabaseNodeSetReady + }, test.Timeout, test.Interval).Should(BeTrue()) + }) + }) + When("Delete database with RemoteDatabaseNodeSet in k8s-mgmt-cluster", func() { + It("Should delete all resources in k8s-data-cluster", func() { + By("issuing create commands...") + storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + databaseSample = testobjects.DefaultDatabase() + databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, api.DatabaseNodeSetSpecInline{ + Name: testNodeSetName + "-remote", + Remote: &api.RemoteSpec{ + Cluster: testRemoteCluster, + }, + DatabaseNodeSpec: api.DatabaseNodeSpec{ + Nodes: 4, + }, + }) + Expect(localClient.Create(ctx, storageSample)).Should(Succeed()) + Expect(localClient.Create(ctx, databaseSample)).Should(Succeed()) + + By("checking that Storage created on local cluster...") + Eventually(func() error { + foundStorage := api.Storage{} + + return localClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name, + Namespace: testobjects.YdbNamespace, + }, &foundStorage) + + }, test.Timeout, test.Interval).ShouldNot(HaveOccurred()) + + By("checking that DatabaseNodeSet created on remote cluster...") + Eventually(func() bool { + founddatabaseNodeSetOnRemote := api.DatabaseNodeSetList{} + + Expect(remoteClient.List(ctx, &founddatabaseNodeSetOnRemote, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + for _, nodeset := range founddatabaseNodeSetOnRemote.Items { + if nodeset.Name == databaseSample.Name+"-"+testNodeSetName+"-remote" { + return true + } + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) + + By("Delete Database on local cluster...") + founddatabase := api.Database{} + Expect(localClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name, + Namespace: testobjects.YdbNamespace, + }, &founddatabase)).Should(Succeed()) + + Expect(localClient.Delete(ctx, &founddatabase)) + + By("checking that DatabaseNodeSets deleted from remote cluster...") + Eventually(func() bool { + founddatabaseNodeSetOnRemote := api.DatabaseNodeSetList{} + + Expect(remoteClient.List(ctx, &founddatabaseNodeSetOnRemote, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + return len(founddatabaseNodeSetOnRemote.Items) == 0 + }, test.Timeout, test.Interval).Should(BeTrue()) + + By("checking that Database deleted from local cluster...") + Eventually(func() bool { + founddatabases := api.DatabaseList{} + + Expect(remoteClient.List(ctx, &founddatabases, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + return len(founddatabases.Items) == 0 + }, test.Timeout, test.Interval).Should(BeTrue()) + }) + }) +}) diff --git a/internal/controllers/remotestoragenodeset/controller.go b/internal/controllers/remotestoragenodeset/controller.go index e7e72c72..533a22f3 100644 --- a/internal/controllers/remotestoragenodeset/controller.go +++ b/internal/controllers/remotestoragenodeset/controller.go @@ -41,6 +41,11 @@ type Reconciler struct { //+kubebuilder:rbac:groups=ydb.tech,resources=storagenodesets,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=ydb.tech,resources=storagenodesets/status,verbs=get;update;patch //+kubebuilder:rbac:groups=ydb.tech,resources=storagenodesets/finalizers,verbs=update +//+kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=services/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=core,resources=services/finalizers,verbs=get;list;watch +//+kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=secrets/status,verbs=get;update;patch func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { logger := log.FromContext(ctx) diff --git a/internal/controllers/remotestoragenodeset/controller_test.go b/internal/controllers/remotestoragenodeset/controller_test.go index 1e996c7a..ca757d83 100644 --- a/internal/controllers/remotestoragenodeset/controller_test.go +++ b/internal/controllers/remotestoragenodeset/controller_test.go @@ -2,6 +2,7 @@ package remotestoragenodeset_test import ( "context" + "fmt" "path/filepath" "testing" @@ -191,7 +192,7 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { }) When("Create Storage with RemoteStorageNodeSet in k8s-mgmt-cluster", func() { - It("Should create StorageNodeSet in k8s-data-cluster", func() { + It("Should create StorageNodeSet and sync resources in k8s-data-cluster", func() { By("issuing create commands...") storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, api.StorageNodeSetSpecInline{ @@ -245,13 +246,13 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { By("checking that StorageNodeSet created on remote cluster...") Eventually(func() bool { - foundStorageNodeSetInRemote := api.StorageNodeSetList{} + foundStorageNodeSetOnRemote := api.StorageNodeSetList{} - Expect(remoteClient.List(ctx, &foundStorageNodeSetInRemote, client.InNamespace( + Expect(remoteClient.List(ctx, &foundStorageNodeSetOnRemote, client.InNamespace( testobjects.YdbNamespace, ))).Should(Succeed()) - for _, nodeset := range foundStorageNodeSetInRemote.Items { + for _, nodeset := range foundStorageNodeSetOnRemote.Items { if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-remote" { return true } @@ -259,20 +260,24 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { return false }, test.Timeout, test.Interval).Should(BeTrue()) - By("checking that StorageNodeSet status is Ready on remote cluster...") - Eventually(func() bool { - foundStorageNodeSetOnRemote := api.StorageNodeSet{} - Expect(remoteClient.Get(ctx, types.NamespacedName{ - Name: storageSample.Name + "-" + testNodeSetName + "-remote", - Namespace: testobjects.YdbNamespace, - }, &foundStorageNodeSetOnRemote)).Should(Succeed()) - - return meta.IsStatusConditionPresentAndEqual( - foundStorageNodeSetOnRemote.Status.Conditions, - StorageNodeSetReadyCondition, - metav1.ConditionTrue, - ) && foundStorageNodeSetOnRemote.Status.State == StorageNodeSetReady - }, test.Timeout, test.Interval).Should(BeTrue()) + By("Set StorageNodeSet status to Ready on remote cluster...") + foundStorageNodeSetOnRemote := api.StorageNodeSet{} + Expect(remoteClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name + "-" + testNodeSetName + "-remote", + Namespace: testobjects.YdbNamespace, + }, &foundStorageNodeSetOnRemote)).Should(Succeed()) + + foundStorageNodeSetOnRemote.Status.State = StorageNodeSetReady + foundStorageNodeSetOnRemote.Status.Conditions = append( + foundStorageNodeSetOnRemote.Status.Conditions, + metav1.Condition{ + Type: StorageNodeSetReadyCondition, + Status: "True", + Reason: ReasonCompleted, + Message: fmt.Sprintf("Scaled StorageNodeSet to %d successfully", foundStorageNodeSetOnRemote.Spec.Nodes), + }, + ) + Expect(remoteClient.Status().Update(ctx, &foundStorageNodeSetOnRemote)).Should(Succeed()) By("checking that RemoteStorageNodeSet status updated on local cluster...") Eventually(func() bool { @@ -290,4 +295,67 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { }, test.Timeout, test.Interval).Should(BeTrue()) }) }) + When("Delete Storage with RemoteStorageNodeSet in k8s-mgmt-cluster", func() { + It("Should delete all resources in k8s-data-cluster", func() { + By("issuing create commands...") + storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, api.StorageNodeSetSpecInline{ + Name: testNodeSetName + "-remote", + Remote: &api.RemoteSpec{ + Cluster: testRemoteCluster, + }, + StorageNodeSpec: api.StorageNodeSpec{ + Nodes: 8, + }, + }) + Expect(localClient.Create(ctx, storageSample)).Should(Succeed()) + + By("checking that StorageNodeSet created on remote cluster...") + Eventually(func() bool { + foundStorageNodeSetOnRemote := api.StorageNodeSetList{} + + Expect(remoteClient.List(ctx, &foundStorageNodeSetOnRemote, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + for _, nodeset := range foundStorageNodeSetOnRemote.Items { + if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-remote" { + return true + } + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) + + By("Delete Storage on local cluster...") + foundStorage := api.Storage{} + Expect(localClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name, + Namespace: testobjects.YdbNamespace, + }, &foundStorage)).Should(Succeed()) + + Expect(localClient.Delete(ctx, &foundStorage)) + + By("checking that StorageNodeSets deleted from remote cluster...") + Eventually(func() bool { + foundStorageNodeSetOnRemote := api.StorageNodeSetList{} + + Expect(remoteClient.List(ctx, &foundStorageNodeSetOnRemote, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + return len(foundStorageNodeSetOnRemote.Items) == 0 + }, test.Timeout, test.Interval).Should(BeTrue()) + + By("checking that Storage deleted from local cluster...") + Eventually(func() bool { + foundStorages := api.StorageList{} + + Expect(remoteClient.List(ctx, &foundStorages, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + return len(foundStorages.Items) == 0 + }, test.Timeout, test.Interval).Should(BeTrue()) + }) + }) }) From a9c9fbcff03b6ee4b1404bf09e3a0b67af4dd2a4 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Mon, 12 Feb 2024 17:05:35 +0300 Subject: [PATCH 21/24] fix golangci lint errors --- internal/controllers/remotedatabasenodeset/controller.go | 1 + internal/controllers/remotestoragenodeset/controller.go | 1 + 2 files changed, 2 insertions(+) diff --git a/internal/controllers/remotedatabasenodeset/controller.go b/internal/controllers/remotedatabasenodeset/controller.go index 086f789e..3ca75ba9 100644 --- a/internal/controllers/remotedatabasenodeset/controller.go +++ b/internal/controllers/remotedatabasenodeset/controller.go @@ -63,6 +63,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, nil } + //nolint:nestif // examine DeletionTimestamp to determine if object is under deletion if remoteDatabaseNodeSet.ObjectMeta.DeletionTimestamp.IsZero() { // The object is not being deleted, so if it does not have our finalizer, diff --git a/internal/controllers/remotestoragenodeset/controller.go b/internal/controllers/remotestoragenodeset/controller.go index 533a22f3..9fca2d10 100644 --- a/internal/controllers/remotestoragenodeset/controller.go +++ b/internal/controllers/remotestoragenodeset/controller.go @@ -63,6 +63,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, nil } + //nolint:nestif // examine DeletionTimestamp to determine if object is under deletion if remoteStorageNodeSet.ObjectMeta.DeletionTimestamp.IsZero() { // The object is not being deleted, so if it does not have our finalizer, From 38552605b4ca7023ead49fcb77e69efa471bf170 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Mon, 12 Feb 2024 17:33:02 +0300 Subject: [PATCH 22/24] fix ut tests --- .../remotedatabasenodeset/controller_test.go | 12 ++++- .../remotestoragenodeset/controller_test.go | 47 ++++++++++--------- 2 files changed, 34 insertions(+), 25 deletions(-) diff --git a/internal/controllers/remotedatabasenodeset/controller_test.go b/internal/controllers/remotedatabasenodeset/controller_test.go index 0934f383..bc49a90a 100644 --- a/internal/controllers/remotedatabasenodeset/controller_test.go +++ b/internal/controllers/remotedatabasenodeset/controller_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "path/filepath" + "runtime" "testing" . "github.com/onsi/ginkgo/v2" @@ -57,11 +58,18 @@ var _ = BeforeSuite(func() { logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + _, curfile, _, _ := runtime.Caller(0) //nolint:dogsled localEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")}, + CRDDirectoryPaths: []string{ + filepath.Join(curfile, filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")), + }, + ErrorIfCRDPathMissing: true, } remoteEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")}, + CRDDirectoryPaths: []string{ + filepath.Join(curfile, filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")), + }, + ErrorIfCRDPathMissing: true, } err := api.AddToScheme(scheme.Scheme) diff --git a/internal/controllers/remotestoragenodeset/controller_test.go b/internal/controllers/remotestoragenodeset/controller_test.go index ca757d83..b322ad25 100644 --- a/internal/controllers/remotestoragenodeset/controller_test.go +++ b/internal/controllers/remotestoragenodeset/controller_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "path/filepath" + "runtime" "testing" . "github.com/onsi/ginkgo/v2" @@ -57,15 +58,23 @@ var _ = BeforeSuite(func() { logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + _, curfile, _, _ := runtime.Caller(0) //nolint:dogsled localEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")}, + CRDDirectoryPaths: []string{ + filepath.Join(curfile, filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")), + }, + ErrorIfCRDPathMissing: true, } remoteEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")}, + CRDDirectoryPaths: []string{ + filepath.Join(curfile, filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")), + }, + ErrorIfCRDPathMissing: true, } err := api.AddToScheme(scheme.Scheme) - Expect(err).ShouldNot(HaveOccurred()) + Expect(err).ToNot(HaveOccurred()) + // +kubebuilder:scaffold:scheme localCfg, err := localEnv.Start() Expect(err).ToNot(HaveOccurred()) @@ -75,16 +84,12 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) Expect(remoteCfg).ToNot(BeNil()) - // +kubebuilder:scaffold:scheme - localManager, err := ctrl.NewManager(localCfg, ctrl.Options{ MetricsBindAddress: "0", Scheme: scheme.Scheme, }) Expect(err).ShouldNot(HaveOccurred()) - //+kubebuilder:scaffold:scheme - remoteManager, err := ctrl.NewManager(remoteCfg, ctrl.Options{ MetricsBindAddress: "0", Scheme: scheme.Scheme, @@ -108,34 +113,30 @@ var _ = BeforeSuite(func() { Expect(err).ShouldNot(HaveOccurred()) err = (&storage.Reconciler{ - Client: localManager.GetClient(), - Scheme: localManager.GetScheme(), - Config: localManager.GetConfig(), - Recorder: localManager.GetEventRecorderFor("ydb-operator"), + Client: localManager.GetClient(), + Scheme: localManager.GetScheme(), + Config: localManager.GetConfig(), }).SetupWithManager(localManager) Expect(err).ShouldNot(HaveOccurred()) err = (&storagenodeset.Reconciler{ - Client: localManager.GetClient(), - Scheme: localManager.GetScheme(), - Config: localManager.GetConfig(), - Recorder: localManager.GetEventRecorderFor("ydb-operator"), + Client: localManager.GetClient(), + Scheme: localManager.GetScheme(), + Config: localManager.GetConfig(), }).SetupWithManager(localManager) Expect(err).ShouldNot(HaveOccurred()) err = (&storagenodeset.Reconciler{ - Client: remoteManager.GetClient(), - Scheme: remoteManager.GetScheme(), - Config: remoteManager.GetConfig(), - Recorder: remoteManager.GetEventRecorderFor("ydb-operator"), + Client: remoteManager.GetClient(), + Scheme: remoteManager.GetScheme(), + Config: remoteManager.GetConfig(), }).SetupWithManager(remoteManager) Expect(err).ShouldNot(HaveOccurred()) err = (&remotestoragenodeset.Reconciler{ - Client: remoteManager.GetClient(), - RemoteClient: localManager.GetClient(), - Scheme: remoteManager.GetScheme(), - RemoteRecorder: remoteManager.GetEventRecorderFor("ydb-operator"), + Client: remoteManager.GetClient(), + RemoteClient: localManager.GetClient(), + Scheme: remoteManager.GetScheme(), }).SetupWithManager(remoteManager, &remoteCluster) Expect(err).ShouldNot(HaveOccurred()) From e5b3c3f1906646e00c12da4818f57038e0d17d18 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Tue, 13 Feb 2024 13:53:23 +0300 Subject: [PATCH 23/24] update unit-tests --- Makefile | 9 +- internal/controllers/constants/constants.go | 1 + internal/controllers/database/init.go | 2 +- internal/controllers/database/sync.go | 12 +- .../controllers/databasenodeset/controller.go | 5 +- .../remotedatabasenodeset/controller.go | 17 +- .../remotedatabasenodeset/controller_test.go | 356 ++++++++++++------ .../controllers/remotedatabasenodeset/sync.go | 57 +-- .../remotestoragenodeset/controller_test.go | 257 +++++++++---- .../controllers/remotestoragenodeset/sync.go | 57 +-- internal/controllers/storage/sync.go | 4 +- internal/resources/database.go | 3 +- internal/resources/remotedatabasenodeset.go | 2 +- 13 files changed, 524 insertions(+), 258 deletions(-) diff --git a/Makefile b/Makefile index e21076d9..4fa573ec 100644 --- a/Makefile +++ b/Makefile @@ -77,9 +77,14 @@ kind-load: docker tag cr.yandex/yc/ydb-operator:latest kind/ydb-operator:current kind load docker-image kind/ydb-operator:current --name kind-ydb-operator +unit-test: manifests generate fmt vet envtest ## Run unit tests + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test -v -timeout 1800s -p 1 ./internal/controllers/... -ginkgo.vv -coverprofile cover.out + +e2e-test: docker-build kind-init kind-load ## Run e2e tests + go test -v -timeout 1800s -p 1 ./e2e/... -args -ginkgo.vv + .PHONY: test -test: manifests generate fmt vet docker-build kind-init kind-load envtest ## Run tests. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test -timeout 1800s -p 1 ./... -ginkgo.v -coverprofile cover.out +test: unit-test test ## Run all tests .PHONY: clean clean: diff --git a/internal/controllers/constants/constants.go b/internal/controllers/constants/constants.go index 47c52e85..21c654e9 100644 --- a/internal/controllers/constants/constants.go +++ b/internal/controllers/constants/constants.go @@ -28,6 +28,7 @@ const ( StorageInitializationRequeueDelay = 5 * time.Second DatabasePending ClusterState = "Pending" + DatabasePreparing ClusterState = "Preparing" DatabaseProvisioning ClusterState = "Provisioning" DatabaseInitializing ClusterState = "Initializing" DatabaseReady ClusterState = "Ready" diff --git a/internal/controllers/database/init.go b/internal/controllers/database/init.go index ab2cd0a7..79ed787c 100644 --- a/internal/controllers/database/init.go +++ b/internal/controllers/database/init.go @@ -64,7 +64,7 @@ func (r *Reconciler) setInitialStatus( changed = true } if database.Status.State == DatabasePending { - database.Status.State = DatabaseInitializing + database.Status.State = DatabasePreparing changed = true } if changed { diff --git a/internal/controllers/database/sync.go b/internal/controllers/database/sync.go index 65c2fbbe..1e74c21e 100644 --- a/internal/controllers/database/sync.go +++ b/internal/controllers/database/sync.go @@ -132,7 +132,7 @@ func (r *Reconciler) waitForDatabaseNodeSetsToReady( ) (bool, ctrl.Result, error) { r.Log.Info("running step waitForDatabaseNodeSetToReady for Database") - if database.Status.State == DatabasePending { + if database.Status.State == DatabasePreparing { r.Recorder.Event( database, corev1.EventTypeNormal, @@ -194,7 +194,7 @@ func (r *Reconciler) waitForStatefulSetToScale( ) (bool, ctrl.Result, error) { r.Log.Info("running step waitForStatefulSetToScale for Database") - if database.Status.State == DatabasePending { + if database.Status.State == DatabasePreparing { r.Recorder.Event( database, corev1.EventTypeNormal, @@ -375,6 +375,7 @@ func (r *Reconciler) setState( return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } + oldStatus := databaseCr.Status.State databaseCr.Status.State = database.Status.State databaseCr.Status.Conditions = database.Status.Conditions @@ -387,6 +388,13 @@ func (r *Reconciler) setState( fmt.Sprintf("failed setting status: %s", err), ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } else if oldStatus != databaseCr.Status.State { + r.Recorder.Event( + databaseCr, + corev1.EventTypeNormal, + "StatusChanged", + fmt.Sprintf("Database moved from %s to %s", oldStatus, databaseCr.Status.State), + ) } return Stop, ctrl.Result{RequeueAfter: StatusUpdateRequeueDelay}, nil diff --git a/internal/controllers/databasenodeset/controller.go b/internal/controllers/databasenodeset/controller.go index 9e6858c4..018ce67f 100644 --- a/internal/controllers/databasenodeset/controller.go +++ b/internal/controllers/databasenodeset/controller.go @@ -6,7 +6,7 @@ import ( "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" @@ -48,13 +48,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu crDatabaseNodeSet := &api.DatabaseNodeSet{} err := r.Get(ctx, req.NamespacedName, crDatabaseNodeSet) if err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { logger.Info("DatabaseNodeSet has been deleted") return ctrl.Result{Requeue: false}, nil } logger.Error(err, "unable to get DatabaseNodeSet") return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } + result, err := r.Sync(ctx, crDatabaseNodeSet) if err != nil { r.Log.Error(err, "unexpected Sync error") diff --git a/internal/controllers/remotedatabasenodeset/controller.go b/internal/controllers/remotedatabasenodeset/controller.go index 3ca75ba9..a5199c04 100644 --- a/internal/controllers/remotedatabasenodeset/controller.go +++ b/internal/controllers/remotedatabasenodeset/controller.go @@ -56,10 +56,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // on deleted requests. if err := r.RemoteClient.Get(ctx, req.NamespacedName, remoteDatabaseNodeSet); err != nil { if apierrors.IsNotFound(err) { - logger.Info("DatabaseNodeSet has been deleted") + logger.Info("RemoteDatabaseNodeSet has been deleted on remote cluster") return ctrl.Result{Requeue: false}, nil } - logger.Error(err, "unable to get RemoteDatabaseNodeSet") + logger.Error(err, "unable to get RemoteDatabaseNodeSet on remote cluster") return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, nil } @@ -79,7 +79,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // The object is being deleted if controllerutil.ContainsFinalizer(remoteDatabaseNodeSet, RemoteFinalizerKey) { // our finalizer is present, so lets handle any external dependency - if err := r.deleteExternalResources(ctx, remoteDatabaseNodeSet); err != nil { + if err := r.deleteExternalResources(ctx, req.NamespacedName); err != nil { // if fail to delete the external dependency here, return with error // so that it can be retried. return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err @@ -104,14 +104,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return result, err } -func (r *Reconciler) deleteExternalResources(ctx context.Context, remoteDatabaseNodeSet *api.RemoteDatabaseNodeSet) error { +func (r *Reconciler) deleteExternalResources(ctx context.Context, key types.NamespacedName) error { logger := log.FromContext(ctx) - storageNodeSet := &api.StorageNodeSet{} - if err := r.Client.Get(ctx, types.NamespacedName{ - Name: remoteDatabaseNodeSet.Name, - Namespace: remoteDatabaseNodeSet.Namespace, - }, storageNodeSet); err != nil { + databaseNodeSet := &api.DatabaseNodeSet{} + if err := r.Client.Get(ctx, key, databaseNodeSet); err != nil { if apierrors.IsNotFound(err) { logger.Info("DatabaseNodeSet not found") return nil @@ -120,7 +117,7 @@ func (r *Reconciler) deleteExternalResources(ctx context.Context, remoteDatabase return err } - if err := r.Client.Delete(ctx, storageNodeSet); err != nil { + if err := r.Client.Delete(ctx, databaseNodeSet); err != nil { logger.Error(err, "unable to delete DatabaseNodeSet") return err } diff --git a/internal/controllers/remotedatabasenodeset/controller_test.go b/internal/controllers/remotedatabasenodeset/controller_test.go index bc49a90a..3cf49803 100644 --- a/internal/controllers/remotedatabasenodeset/controller_test.go +++ b/internal/controllers/remotedatabasenodeset/controller_test.go @@ -4,15 +4,20 @@ import ( "context" "fmt" "path/filepath" - "runtime" + "strings" "testing" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" "k8s.io/kubectl/pkg/scheme" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -28,6 +33,7 @@ import ( "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/database" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/databasenodeset" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/remotedatabasenodeset" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storage" "github.com/ydb-platform/ydb-kubernetes-operator/internal/test" ) @@ -45,7 +51,7 @@ var ( cancel context.CancelFunc ) -func TestRemoteNodeSetApis(t *testing.T) { +func TestRemoteDatabaseNodeSetApis(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "RemoteDatabaseNodeSet controller tests") @@ -58,16 +64,15 @@ var _ = BeforeSuite(func() { logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) - _, curfile, _, _ := runtime.Caller(0) //nolint:dogsled localEnv = &envtest.Environment{ CRDDirectoryPaths: []string{ - filepath.Join(curfile, filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")), + filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds"), }, ErrorIfCRDPathMissing: true, } remoteEnv = &envtest.Environment{ CRDDirectoryPaths: []string{ - filepath.Join(curfile, filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")), + filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds"), }, ErrorIfCRDPathMissing: true, } @@ -115,35 +120,38 @@ var _ = BeforeSuite(func() { err = remoteManager.Add(remoteCluster) Expect(err).ShouldNot(HaveOccurred()) + err = (&storage.Reconciler{ + Client: localManager.GetClient(), + Scheme: localManager.GetScheme(), + Config: localManager.GetConfig(), + }).SetupWithManager(localManager) + Expect(err).ShouldNot(HaveOccurred()) + err = (&database.Reconciler{ - Client: localManager.GetClient(), - Scheme: localManager.GetScheme(), - Config: localManager.GetConfig(), - Recorder: localManager.GetEventRecorderFor("ydb-operator"), + Client: localManager.GetClient(), + Scheme: localManager.GetScheme(), + Config: localManager.GetConfig(), }).SetupWithManager(localManager) Expect(err).ShouldNot(HaveOccurred()) err = (&databasenodeset.Reconciler{ - Client: localManager.GetClient(), - Scheme: localManager.GetScheme(), - Config: localManager.GetConfig(), - Recorder: localManager.GetEventRecorderFor("ydb-operator"), + Client: localManager.GetClient(), + Scheme: localManager.GetScheme(), + Config: localManager.GetConfig(), }).SetupWithManager(localManager) Expect(err).ShouldNot(HaveOccurred()) err = (&databasenodeset.Reconciler{ - Client: remoteManager.GetClient(), - Scheme: remoteManager.GetScheme(), - Config: remoteManager.GetConfig(), - Recorder: remoteManager.GetEventRecorderFor("ydb-operator"), + Client: remoteManager.GetClient(), + Scheme: remoteManager.GetScheme(), + Config: remoteManager.GetConfig(), }).SetupWithManager(remoteManager) Expect(err).ShouldNot(HaveOccurred()) err = (&remotedatabasenodeset.Reconciler{ - Client: remoteManager.GetClient(), - RemoteClient: localManager.GetClient(), - Scheme: remoteManager.GetScheme(), - RemoteRecorder: remoteManager.GetEventRecorderFor("ydb-operator"), + Client: remoteManager.GetClient(), + RemoteClient: localManager.GetClient(), + Scheme: remoteManager.GetScheme(), }).SetupWithManager(remoteManager, &remoteCluster) Expect(err).ShouldNot(HaveOccurred()) @@ -181,6 +189,25 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { var databaseSample *api.Database BeforeEach(func() { + storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + databaseSample = testobjects.DefaultDatabase() + databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, api.DatabaseNodeSetSpecInline{ + Name: testNodeSetName + "-local", + DatabaseNodeSpec: api.DatabaseNodeSpec{ + Nodes: 4, + }, + }) + databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, api.DatabaseNodeSetSpecInline{ + Name: testNodeSetName + "-remote", + Remote: &api.RemoteSpec{ + Cluster: testRemoteCluster, + }, + DatabaseNodeSpec: api.DatabaseNodeSpec{ + Nodes: 4, + }, + }) + + By("issuing create Namespace commands...") localNamespace = corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: testobjects.YdbNamespace, @@ -193,48 +220,43 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { } Expect(localClient.Create(ctx, &localNamespace)).Should(Succeed()) Expect(remoteClient.Create(ctx, &remoteNamespace)).Should(Succeed()) + + By("issuing create Storage commands...") + Expect(localClient.Create(ctx, storageSample)).Should(Succeed()) + By("checking that Storage created on local cluster...") + foundStorage := api.Storage{} + Eventually(func() bool { + Expect(localClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name, + Namespace: testobjects.YdbNamespace, + }, &foundStorage)) + return foundStorage.Status.State == StorageProvisioning + }, test.Timeout, test.Interval).Should(BeTrue()) + By("set status Ready to Storage...") + foundStorage.Status.State = StorageReady + Expect(localClient.Status().Update(ctx, &foundStorage)).Should(Succeed()) + + By("issuing create Database commands...") + Expect(localClient.Create(ctx, databaseSample)).Should(Succeed()) + By("checking that Database created on local cluster...") + foundDatabase := api.Database{} + Eventually(func() bool { + Expect(localClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name, + Namespace: testobjects.YdbNamespace, + }, &foundDatabase)) + return foundDatabase.Status.State == DatabaseProvisioning + }, test.Timeout, test.Interval).Should(BeTrue()) }) AfterEach(func() { - Expect(localClient.Delete(ctx, &localNamespace)).Should(Succeed()) - Expect(remoteClient.Delete(ctx, &remoteNamespace)).Should(Succeed()) + deleteAll(localEnv, localClient, &localNamespace) + deleteAll(remoteEnv, remoteClient, &localNamespace) }) - When("Create database with RemoteDatabaseNodeSet in k8s-mgmt-cluster", func() { - It("Should create databaseNodeSet and sync resources in k8s-data-cluster", func() { - By("issuing create commands...") - storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) - databaseSample = testobjects.DefaultDatabase() - databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, api.DatabaseNodeSetSpecInline{ - Name: testNodeSetName + "-local", - DatabaseNodeSpec: api.DatabaseNodeSpec{ - Nodes: 4, - }, - }) - databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, api.DatabaseNodeSetSpecInline{ - Name: testNodeSetName + "-remote", - Remote: &api.RemoteSpec{ - Cluster: testRemoteCluster, - }, - DatabaseNodeSpec: api.DatabaseNodeSpec{ - Nodes: 4, - }, - }) - Expect(localClient.Create(ctx, storageSample)).Should(Succeed()) - Expect(localClient.Create(ctx, databaseSample)).Should(Succeed()) - - By("checking that Storage created on local cluster...") - Eventually(func() error { - foundStorage := api.Storage{} - - return localClient.Get(ctx, types.NamespacedName{ - Name: storageSample.Name, - Namespace: testobjects.YdbNamespace, - }, &foundStorage) - - }, test.Timeout, test.Interval).ShouldNot(HaveOccurred()) - - By("checking that databaseNodeSet created on local cluster...") + When("Create Database with RemoteDatabaseNodeSet in k8s-mgmt-cluster", func() { + It("Should create DatabaseNodeSet and sync resources in k8s-data-cluster", func() { + By("checking that DatabaseNodeSet created on local cluster...") Eventually(func() bool { foundDatabaseNodeSet := api.DatabaseNodeSetList{} @@ -266,15 +288,15 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { return false }, test.Timeout, test.Interval).Should(BeTrue()) - By("checking that databaseNodeSet created on remote cluster...") + By("checking that DatabaseNodeSet created on remote cluster...") Eventually(func() bool { - founddatabaseNodeSetOnRemote := api.DatabaseNodeSetList{} + foundDatabaseNodeSetOnRemote := api.DatabaseNodeSetList{} - Expect(remoteClient.List(ctx, &founddatabaseNodeSetOnRemote, client.InNamespace( + Expect(remoteClient.List(ctx, &foundDatabaseNodeSetOnRemote, client.InNamespace( testobjects.YdbNamespace, ))).Should(Succeed()) - for _, nodeset := range founddatabaseNodeSetOnRemote.Items { + for _, nodeset := range foundDatabaseNodeSetOnRemote.Items { if nodeset.Name == databaseSample.Name+"-"+testNodeSetName+"-remote" { return true } @@ -282,24 +304,25 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { return false }, test.Timeout, test.Interval).Should(BeTrue()) - By("Set databaseNodeSet status to Ready on remote cluster...") - founddatabaseNodeSetOnRemote := api.DatabaseNodeSet{} + By("set DatabaseNodeSet status to Ready on remote cluster...") + foundDatabaseNodeSetOnRemote := api.DatabaseNodeSet{} Expect(remoteClient.Get(ctx, types.NamespacedName{ Name: databaseSample.Name + "-" + testNodeSetName + "-remote", Namespace: testobjects.YdbNamespace, - }, &founddatabaseNodeSetOnRemote)).Should(Succeed()) + }, &foundDatabaseNodeSetOnRemote)).Should(Succeed()) - founddatabaseNodeSetOnRemote.Status.State = DatabaseNodeSetReady - founddatabaseNodeSetOnRemote.Status.Conditions = append( - founddatabaseNodeSetOnRemote.Status.Conditions, + foundDatabaseNodeSetOnRemote.Status.State = DatabaseNodeSetReady + foundDatabaseNodeSetOnRemote.Status.Conditions = append( + foundDatabaseNodeSetOnRemote.Status.Conditions, metav1.Condition{ - Type: DatabaseNodeSetReadyCondition, - Status: "True", - Reason: ReasonCompleted, - Message: fmt.Sprintf("Scaled databaseNodeSet to %d successfully", founddatabaseNodeSetOnRemote.Spec.Nodes), + Type: DatabaseNodeSetReadyCondition, + Status: "True", + Reason: ReasonCompleted, + LastTransitionTime: metav1.NewTime(time.Now()), + Message: fmt.Sprintf("Scaled databaseNodeSet to %d successfully", foundDatabaseNodeSetOnRemote.Spec.Nodes), }, ) - Expect(remoteClient.Status().Update(ctx, &founddatabaseNodeSetOnRemote)).Should(Succeed()) + Expect(remoteClient.Status().Update(ctx, &foundDatabaseNodeSetOnRemote)).Should(Succeed()) By("checking that RemoteDatabaseNodeSet status updated on local cluster...") Eventually(func() bool { @@ -319,41 +342,31 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { }) When("Delete database with RemoteDatabaseNodeSet in k8s-mgmt-cluster", func() { It("Should delete all resources in k8s-data-cluster", func() { - By("issuing create commands...") - storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) - databaseSample = testobjects.DefaultDatabase() - databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, api.DatabaseNodeSetSpecInline{ - Name: testNodeSetName + "-remote", - Remote: &api.RemoteSpec{ - Cluster: testRemoteCluster, - }, - DatabaseNodeSpec: api.DatabaseNodeSpec{ - Nodes: 4, - }, - }) - Expect(localClient.Create(ctx, storageSample)).Should(Succeed()) - Expect(localClient.Create(ctx, databaseSample)).Should(Succeed()) - - By("checking that Storage created on local cluster...") - Eventually(func() error { - foundStorage := api.Storage{} + By("checking that RemoteDatabaseNodeSet created on local cluster...") + Eventually(func() bool { + foundRemoteDatabaseNodeSet := api.RemoteDatabaseNodeSetList{} - return localClient.Get(ctx, types.NamespacedName{ - Name: storageSample.Name, - Namespace: testobjects.YdbNamespace, - }, &foundStorage) + Expect(localClient.List(ctx, &foundRemoteDatabaseNodeSet, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) - }, test.Timeout, test.Interval).ShouldNot(HaveOccurred()) + for _, nodeset := range foundRemoteDatabaseNodeSet.Items { + if nodeset.Name == databaseSample.Name+"-"+testNodeSetName+"-remote" { + return true + } + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) By("checking that DatabaseNodeSet created on remote cluster...") Eventually(func() bool { - founddatabaseNodeSetOnRemote := api.DatabaseNodeSetList{} + foundDatabaseNodeSet := api.DatabaseNodeSetList{} - Expect(remoteClient.List(ctx, &founddatabaseNodeSetOnRemote, client.InNamespace( + Expect(remoteClient.List(ctx, &foundDatabaseNodeSet, client.InNamespace( testobjects.YdbNamespace, ))).Should(Succeed()) - for _, nodeset := range founddatabaseNodeSetOnRemote.Items { + for _, nodeset := range foundDatabaseNodeSet.Items { if nodeset.Name == databaseSample.Name+"-"+testNodeSetName+"-remote" { return true } @@ -361,36 +374,141 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { return false }, test.Timeout, test.Interval).Should(BeTrue()) - By("Delete Database on local cluster...") - founddatabase := api.Database{} - Expect(localClient.Get(ctx, types.NamespacedName{ - Name: databaseSample.Name, - Namespace: testobjects.YdbNamespace, - }, &founddatabase)).Should(Succeed()) - - Expect(localClient.Delete(ctx, &founddatabase)) + By("delete RemoteDatabaseNodeSet on local cluster...") + Eventually(func() error { + foundDatabase := api.Database{} + Expect(localClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name, + Namespace: testobjects.YdbNamespace, + }, &foundDatabase)).Should(Succeed()) + foundDatabase.Spec.NodeSets = []api.DatabaseNodeSetSpecInline{ + { + Name: testNodeSetName + "-local", + DatabaseNodeSpec: api.DatabaseNodeSpec{ + Nodes: 4, + }, + }, + } + return localClient.Update(ctx, &foundDatabase) + }, test.Timeout, test.Interval).Should(Succeed()) - By("checking that DatabaseNodeSets deleted from remote cluster...") + By("checking that DatabaseNodeSet deleted from remote cluster...") Eventually(func() bool { - founddatabaseNodeSetOnRemote := api.DatabaseNodeSetList{} + foundDatabaseNodeSetOnRemote := api.DatabaseNodeSet{} - Expect(remoteClient.List(ctx, &founddatabaseNodeSetOnRemote, client.InNamespace( - testobjects.YdbNamespace, - ))).Should(Succeed()) + err := remoteClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name + "-" + testNodeSetName + "-remote", + Namespace: testobjects.YdbNamespace, + }, &foundDatabaseNodeSetOnRemote) - return len(founddatabaseNodeSetOnRemote.Items) == 0 + return apierrors.IsNotFound(err) }, test.Timeout, test.Interval).Should(BeTrue()) - By("checking that Database deleted from local cluster...") + By("checking that RemoteDatabaseNodeSet deleted from local cluster...") Eventually(func() bool { - founddatabases := api.DatabaseList{} + foundRemoteDatabaseNodeSet := api.RemoteDatabaseNodeSet{} - Expect(remoteClient.List(ctx, &founddatabases, client.InNamespace( - testobjects.YdbNamespace, - ))).Should(Succeed()) + err := localClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name + "-" + testNodeSetName + "-remote", + Namespace: testobjects.YdbNamespace, + }, &foundRemoteDatabaseNodeSet) - return len(founddatabases.Items) == 0 + return apierrors.IsNotFound(err) }, test.Timeout, test.Interval).Should(BeTrue()) }) }) }) + +func deleteAll(env *envtest.Environment, k8sClient client.Client, objs ...client.Object) { + for _, obj := range objs { + ctx := context.Background() + clientGo, err := kubernetes.NewForConfig(env.Config) + Expect(err).ShouldNot(HaveOccurred()) + Expect(client.IgnoreNotFound(k8sClient.Delete(ctx, obj))).Should(Succeed()) + + if ns, ok := obj.(*corev1.Namespace); ok { + // Normally the kube-controller-manager would handle finalization + // and garbage collection of namespaces, but with envtest, we aren't + // running a kube-controller-manager. Instead we're gonna approximate + // (poorly) the kube-controller-manager by explicitly deleting some + // resources within the namespace and then removing the `kubernetes` + // finalizer from the namespace resource so it can finish deleting. + // Note that any resources within the namespace that we don't + // successfully delete could reappear if the namespace is ever + // recreated with the same name. + + // Look up all namespaced resources under the discovery API + _, apiResources, err := clientGo.Discovery().ServerGroupsAndResources() + Expect(err).ShouldNot(HaveOccurred()) + namespacedGVKs := make(map[string]schema.GroupVersionKind) + for _, apiResourceList := range apiResources { + defaultGV, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) + Expect(err).ShouldNot(HaveOccurred()) + for _, r := range apiResourceList.APIResources { + if !r.Namespaced || strings.Contains(r.Name, "/") { + // skip non-namespaced and subresources + continue + } + gvk := schema.GroupVersionKind{ + Group: defaultGV.Group, + Version: defaultGV.Version, + Kind: r.Kind, + } + if r.Group != "" { + gvk.Group = r.Group + } + if r.Version != "" { + gvk.Version = r.Version + } + namespacedGVKs[gvk.String()] = gvk + } + } + + // Delete all namespaced resources in this namespace + for _, gvk := range namespacedGVKs { + var u unstructured.Unstructured + u.SetGroupVersionKind(gvk) + err := k8sClient.DeleteAllOf(ctx, &u, client.InNamespace(ns.Name)) + Expect(client.IgnoreNotFound(ignoreMethodNotAllowed(err))).ShouldNot(HaveOccurred()) + } + + Eventually(func() error { + key := client.ObjectKeyFromObject(ns) + if err := k8sClient.Get(ctx, key, ns); err != nil { + return client.IgnoreNotFound(err) + } + // remove `kubernetes` finalizer + const kubernetes = "kubernetes" + finalizers := []corev1.FinalizerName{} + for _, f := range ns.Spec.Finalizers { + if f != kubernetes { + finalizers = append(finalizers, f) + } + } + ns.Spec.Finalizers = finalizers + + // We have to use the k8s.io/client-go library here to expose + // ability to patch the /finalize subresource on the namespace + _, err = clientGo.CoreV1().Namespaces().Finalize(ctx, ns, metav1.UpdateOptions{}) + return err + }, test.Timeout, test.Interval).Should(Succeed()) + } + + Eventually(func() metav1.StatusReason { + key := client.ObjectKeyFromObject(obj) + if err := k8sClient.Get(ctx, key, obj); err != nil { + return apierrors.ReasonForError(err) + } + return "" + }, test.Timeout, test.Interval).Should(Equal(metav1.StatusReasonNotFound)) + } +} + +func ignoreMethodNotAllowed(err error) error { + if err != nil { + if apierrors.ReasonForError(err) == metav1.StatusReasonMethodNotAllowed { + return nil + } + } + return err +} diff --git a/internal/controllers/remotedatabasenodeset/sync.go b/internal/controllers/remotedatabasenodeset/sync.go index 31ffa5cb..68797e4e 100644 --- a/internal/controllers/remotedatabasenodeset/sync.go +++ b/internal/controllers/remotedatabasenodeset/sync.go @@ -6,7 +6,6 @@ import ( "reflect" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" @@ -101,61 +100,67 @@ func (r *Reconciler) updateStatus( ) (bool, ctrl.Result, error) { r.Log.Info("running step updateStatus") - databaseNodeSet := ydbv1alpha1.DatabaseNodeSet{} - err := r.Client.Get(ctx, types.NamespacedName{ + crRemoteDatabaseNodeSet := &ydbv1alpha1.RemoteDatabaseNodeSet{} + err := r.RemoteClient.Get(ctx, types.NamespacedName{ + Namespace: remoteDatabaseNodeSet.Namespace, + Name: remoteDatabaseNodeSet.Name, + }, crRemoteDatabaseNodeSet) + if err != nil { + r.Recorder.Event( + crRemoteDatabaseNodeSet, + corev1.EventTypeWarning, + "ControllerError", + "Failed fetching RemoteDatabaseNodeSet on remote cluster before status update", + ) + return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } + + databaseNodeSet := &ydbv1alpha1.DatabaseNodeSet{} + err = r.Client.Get(ctx, types.NamespacedName{ Name: remoteDatabaseNodeSet.Name, Namespace: remoteDatabaseNodeSet.Namespace, - }, &databaseNodeSet) + }, databaseNodeSet) if err != nil { - if errors.IsNotFound(err) { - r.Recorder.Event( - remoteDatabaseNodeSet, - corev1.EventTypeWarning, - "ProvisioningFailed", - fmt.Sprintf("DatabaseNodeSet with name %s was not found: %s", remoteDatabaseNodeSet.Name, err), - ) - return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, nil - } r.Recorder.Event( - remoteDatabaseNodeSet, + crRemoteDatabaseNodeSet, corev1.EventTypeWarning, "ControllerError", - fmt.Sprintf("Failed to get DatabaseNodeSet: %s", err), + "Failed fetching DatabaseNodeSet before status update", ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } - oldStatus := remoteDatabaseNodeSet.Status.State - remoteDatabaseNodeSet.Status.State = databaseNodeSet.Status.State - remoteDatabaseNodeSet.Status.Conditions = databaseNodeSet.Status.Conditions + oldStatus := crRemoteDatabaseNodeSet.Status.State + crRemoteDatabaseNodeSet.Status.State = databaseNodeSet.Status.State + crRemoteDatabaseNodeSet.Status.Conditions = databaseNodeSet.Status.Conditions - err = r.RemoteClient.Status().Update(ctx, remoteDatabaseNodeSet) + err = r.RemoteClient.Status().Update(ctx, crRemoteDatabaseNodeSet) if err != nil { r.Recorder.Event( - remoteDatabaseNodeSet, + crRemoteDatabaseNodeSet, corev1.EventTypeWarning, "ControllerError", fmt.Sprintf("Failed setting status on remote cluster: %s", err), ) r.RemoteRecorder.Event( - remoteDatabaseNodeSet, + crRemoteDatabaseNodeSet, corev1.EventTypeWarning, "ControllerError", fmt.Sprintf("Failed setting status: %s", err), ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err - } else if oldStatus != databaseNodeSet.Status.State { + } else if oldStatus != crRemoteDatabaseNodeSet.Status.State { r.Recorder.Event( - remoteDatabaseNodeSet, + crRemoteDatabaseNodeSet, corev1.EventTypeNormal, "StatusChanged", - fmt.Sprintf("RemoteDatabaseNodeSet moved from %s to %s on remote cluster", oldStatus, remoteDatabaseNodeSet.Status.State), + fmt.Sprintf("DatabaseNodeSet moved from %s to %s on remote cluster", oldStatus, crRemoteDatabaseNodeSet.Status.State), ) r.RemoteRecorder.Event( - remoteDatabaseNodeSet, + crRemoteDatabaseNodeSet, corev1.EventTypeNormal, "StatusChanged", - fmt.Sprintf("RemoteDatabaseNodeSet moved from %s to %s", oldStatus, remoteDatabaseNodeSet.Status.State), + fmt.Sprintf("DatabaseNodeSet moved from %s to %s", oldStatus, crRemoteDatabaseNodeSet.Status.State), ) } diff --git a/internal/controllers/remotestoragenodeset/controller_test.go b/internal/controllers/remotestoragenodeset/controller_test.go index b322ad25..fc536e95 100644 --- a/internal/controllers/remotestoragenodeset/controller_test.go +++ b/internal/controllers/remotestoragenodeset/controller_test.go @@ -4,15 +4,20 @@ import ( "context" "fmt" "path/filepath" - "runtime" + "strings" "testing" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" "k8s.io/kubectl/pkg/scheme" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -45,7 +50,7 @@ var ( cancel context.CancelFunc ) -func TestRemoteNodeSetApis(t *testing.T) { +func TestRemoteStorageNodeSetApis(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "RemoteStorageNodeSet controller tests") @@ -58,16 +63,15 @@ var _ = BeforeSuite(func() { logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) - _, curfile, _, _ := runtime.Caller(0) //nolint:dogsled localEnv = &envtest.Environment{ CRDDirectoryPaths: []string{ - filepath.Join(curfile, filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")), + filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds"), }, ErrorIfCRDPathMissing: true, } remoteEnv = &envtest.Environment{ CRDDirectoryPaths: []string{ - filepath.Join(curfile, filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds")), + filepath.Join("..", "..", "..", "deploy", "ydb-operator", "crds"), }, ErrorIfCRDPathMissing: true, } @@ -173,6 +177,24 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { var storageSample *api.Storage BeforeEach(func() { + storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) + storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, api.StorageNodeSetSpecInline{ + Name: testNodeSetName + "-local", + StorageNodeSpec: api.StorageNodeSpec{ + Nodes: 4, + }, + }) + storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, api.StorageNodeSetSpecInline{ + Name: testNodeSetName + "-remote", + Remote: &api.RemoteSpec{ + Cluster: testRemoteCluster, + }, + StorageNodeSpec: api.StorageNodeSpec{ + Nodes: 4, + }, + }) + + By("issuing create Namespace commands...") localNamespace = corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: testobjects.YdbNamespace, @@ -185,34 +207,30 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { } Expect(localClient.Create(ctx, &localNamespace)).Should(Succeed()) Expect(remoteClient.Create(ctx, &remoteNamespace)).Should(Succeed()) + + By("issuing create Storage commands...") + Expect(localClient.Create(ctx, storageSample)).Should(Succeed()) + By("checking that Storage created on local cluster...") + foundStorage := api.Storage{} + Eventually(func() bool { + Expect(localClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name, + Namespace: testobjects.YdbNamespace, + }, &foundStorage)) + return foundStorage.Status.State == StorageProvisioning + }, test.Timeout, test.Interval).Should(BeTrue()) + By("set status Ready to Storage...") + foundStorage.Status.State = StorageReady + Expect(localClient.Status().Update(ctx, &foundStorage)).Should(Succeed()) }) AfterEach(func() { - Expect(localClient.Delete(ctx, &localNamespace)).Should(Succeed()) - Expect(remoteClient.Delete(ctx, &remoteNamespace)).Should(Succeed()) + deleteAll(localEnv, localClient, &localNamespace) + deleteAll(remoteEnv, remoteClient, &localNamespace) }) When("Create Storage with RemoteStorageNodeSet in k8s-mgmt-cluster", func() { It("Should create StorageNodeSet and sync resources in k8s-data-cluster", func() { - By("issuing create commands...") - storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) - storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, api.StorageNodeSetSpecInline{ - Name: testNodeSetName + "-local", - StorageNodeSpec: api.StorageNodeSpec{ - Nodes: 4, - }, - }) - storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, api.StorageNodeSetSpecInline{ - Name: testNodeSetName + "-remote", - Remote: &api.RemoteSpec{ - Cluster: testRemoteCluster, - }, - StorageNodeSpec: api.StorageNodeSpec{ - Nodes: 4, - }, - }) - Expect(localClient.Create(ctx, storageSample)).Should(Succeed()) - By("checking that StorageNodeSet created on local cluster...") Eventually(func() bool { foundStorageNodeSet := api.StorageNodeSetList{} @@ -272,10 +290,11 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { foundStorageNodeSetOnRemote.Status.Conditions = append( foundStorageNodeSetOnRemote.Status.Conditions, metav1.Condition{ - Type: StorageNodeSetReadyCondition, - Status: "True", - Reason: ReasonCompleted, - Message: fmt.Sprintf("Scaled StorageNodeSet to %d successfully", foundStorageNodeSetOnRemote.Spec.Nodes), + Type: StorageNodeSetReadyCondition, + Status: "True", + Reason: ReasonCompleted, + LastTransitionTime: metav1.NewTime(time.Now()), + Message: fmt.Sprintf("Scaled StorageNodeSet to %d successfully", foundStorageNodeSetOnRemote.Spec.Nodes), }, ) Expect(remoteClient.Status().Update(ctx, &foundStorageNodeSetOnRemote)).Should(Succeed()) @@ -298,28 +317,31 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { }) When("Delete Storage with RemoteStorageNodeSet in k8s-mgmt-cluster", func() { It("Should delete all resources in k8s-data-cluster", func() { - By("issuing create commands...") - storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-block-4-2-config.yaml")) - storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, api.StorageNodeSetSpecInline{ - Name: testNodeSetName + "-remote", - Remote: &api.RemoteSpec{ - Cluster: testRemoteCluster, - }, - StorageNodeSpec: api.StorageNodeSpec{ - Nodes: 8, - }, - }) - Expect(localClient.Create(ctx, storageSample)).Should(Succeed()) + By("checking that RemoteStorageNodeSet created on local cluster...") + Eventually(func() bool { + foundRemoteStorageNodeSet := api.RemoteStorageNodeSetList{} + + Expect(localClient.List(ctx, &foundRemoteStorageNodeSet, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) + + for _, nodeset := range foundRemoteStorageNodeSet.Items { + if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-remote" { + return true + } + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) By("checking that StorageNodeSet created on remote cluster...") Eventually(func() bool { - foundStorageNodeSetOnRemote := api.StorageNodeSetList{} + foundStorageNodeSet := api.StorageNodeSetList{} - Expect(remoteClient.List(ctx, &foundStorageNodeSetOnRemote, client.InNamespace( + Expect(remoteClient.List(ctx, &foundStorageNodeSet, client.InNamespace( testobjects.YdbNamespace, ))).Should(Succeed()) - for _, nodeset := range foundStorageNodeSetOnRemote.Items { + for _, nodeset := range foundStorageNodeSet.Items { if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-remote" { return true } @@ -327,36 +349,141 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { return false }, test.Timeout, test.Interval).Should(BeTrue()) - By("Delete Storage on local cluster...") - foundStorage := api.Storage{} - Expect(localClient.Get(ctx, types.NamespacedName{ - Name: storageSample.Name, - Namespace: testobjects.YdbNamespace, - }, &foundStorage)).Should(Succeed()) - - Expect(localClient.Delete(ctx, &foundStorage)) + By("delete RemoteStorageNodeSet on local cluster...") + Eventually(func() error { + foundStorage := api.Storage{} + Expect(localClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name, + Namespace: testobjects.YdbNamespace, + }, &foundStorage)).Should(Succeed()) + foundStorage.Spec.NodeSets = []api.StorageNodeSetSpecInline{ + { + Name: testNodeSetName + "-local", + StorageNodeSpec: api.StorageNodeSpec{ + Nodes: 4, + }, + }, + } + return localClient.Update(ctx, &foundStorage) + }, test.Timeout, test.Interval).Should(Succeed()) - By("checking that StorageNodeSets deleted from remote cluster...") + By("checking that StorageNodeSet deleted from remote cluster...") Eventually(func() bool { - foundStorageNodeSetOnRemote := api.StorageNodeSetList{} + foundStorageNodeSetOnRemote := api.StorageNodeSet{} - Expect(remoteClient.List(ctx, &foundStorageNodeSetOnRemote, client.InNamespace( - testobjects.YdbNamespace, - ))).Should(Succeed()) + err := remoteClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name + "-" + testNodeSetName + "-remote", + Namespace: testobjects.YdbNamespace, + }, &foundStorageNodeSetOnRemote) - return len(foundStorageNodeSetOnRemote.Items) == 0 + return apierrors.IsNotFound(err) }, test.Timeout, test.Interval).Should(BeTrue()) - By("checking that Storage deleted from local cluster...") + By("checking that RemoteStorageNodeSet deleted from local cluster...") Eventually(func() bool { - foundStorages := api.StorageList{} + foundRemoteStorageNodeSet := api.RemoteStorageNodeSet{} - Expect(remoteClient.List(ctx, &foundStorages, client.InNamespace( - testobjects.YdbNamespace, - ))).Should(Succeed()) + err := localClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name + "-" + testNodeSetName + "-remote", + Namespace: testobjects.YdbNamespace, + }, &foundRemoteStorageNodeSet) - return len(foundStorages.Items) == 0 + return apierrors.IsNotFound(err) }, test.Timeout, test.Interval).Should(BeTrue()) }) }) }) + +func deleteAll(env *envtest.Environment, k8sClient client.Client, objs ...client.Object) { + for _, obj := range objs { + ctx := context.Background() + clientGo, err := kubernetes.NewForConfig(env.Config) + Expect(err).ShouldNot(HaveOccurred()) + Expect(client.IgnoreNotFound(k8sClient.Delete(ctx, obj))).Should(Succeed()) + + if ns, ok := obj.(*corev1.Namespace); ok { + // Normally the kube-controller-manager would handle finalization + // and garbage collection of namespaces, but with envtest, we aren't + // running a kube-controller-manager. Instead we're gonna approximate + // (poorly) the kube-controller-manager by explicitly deleting some + // resources within the namespace and then removing the `kubernetes` + // finalizer from the namespace resource so it can finish deleting. + // Note that any resources within the namespace that we don't + // successfully delete could reappear if the namespace is ever + // recreated with the same name. + + // Look up all namespaced resources under the discovery API + _, apiResources, err := clientGo.Discovery().ServerGroupsAndResources() + Expect(err).ShouldNot(HaveOccurred()) + namespacedGVKs := make(map[string]schema.GroupVersionKind) + for _, apiResourceList := range apiResources { + defaultGV, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) + Expect(err).ShouldNot(HaveOccurred()) + for _, r := range apiResourceList.APIResources { + if !r.Namespaced || strings.Contains(r.Name, "/") { + // skip non-namespaced and subresources + continue + } + gvk := schema.GroupVersionKind{ + Group: defaultGV.Group, + Version: defaultGV.Version, + Kind: r.Kind, + } + if r.Group != "" { + gvk.Group = r.Group + } + if r.Version != "" { + gvk.Version = r.Version + } + namespacedGVKs[gvk.String()] = gvk + } + } + + // Delete all namespaced resources in this namespace + for _, gvk := range namespacedGVKs { + var u unstructured.Unstructured + u.SetGroupVersionKind(gvk) + err := k8sClient.DeleteAllOf(ctx, &u, client.InNamespace(ns.Name)) + Expect(client.IgnoreNotFound(ignoreMethodNotAllowed(err))).ShouldNot(HaveOccurred()) + } + + Eventually(func() error { + key := client.ObjectKeyFromObject(ns) + if err := k8sClient.Get(ctx, key, ns); err != nil { + return client.IgnoreNotFound(err) + } + // remove `kubernetes` finalizer + const kubernetes = "kubernetes" + finalizers := []corev1.FinalizerName{} + for _, f := range ns.Spec.Finalizers { + if f != kubernetes { + finalizers = append(finalizers, f) + } + } + ns.Spec.Finalizers = finalizers + + // We have to use the k8s.io/client-go library here to expose + // ability to patch the /finalize subresource on the namespace + _, err = clientGo.CoreV1().Namespaces().Finalize(ctx, ns, metav1.UpdateOptions{}) + return err + }, test.Timeout, test.Interval).Should(Succeed()) + } + + Eventually(func() metav1.StatusReason { + key := client.ObjectKeyFromObject(obj) + if err := k8sClient.Get(ctx, key, obj); err != nil { + return apierrors.ReasonForError(err) + } + return "" + }, test.Timeout, test.Interval).Should(Equal(metav1.StatusReasonNotFound)) + } +} + +func ignoreMethodNotAllowed(err error) error { + if err != nil { + if apierrors.ReasonForError(err) == metav1.StatusReasonMethodNotAllowed { + return nil + } + } + return err +} diff --git a/internal/controllers/remotestoragenodeset/sync.go b/internal/controllers/remotestoragenodeset/sync.go index 95e4db57..1d425436 100644 --- a/internal/controllers/remotestoragenodeset/sync.go +++ b/internal/controllers/remotestoragenodeset/sync.go @@ -6,7 +6,6 @@ import ( "reflect" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" @@ -101,61 +100,67 @@ func (r *Reconciler) updateStatus( ) (bool, ctrl.Result, error) { r.Log.Info("running step updateStatus") - storageNodeSet := ydbv1alpha1.StorageNodeSet{} - err := r.Client.Get(ctx, types.NamespacedName{ + crRemoteStorageNodeSet := &ydbv1alpha1.RemoteStorageNodeSet{} + err := r.RemoteClient.Get(ctx, types.NamespacedName{ + Namespace: remoteStorageNodeSet.Namespace, + Name: remoteStorageNodeSet.Name, + }, crRemoteStorageNodeSet) + if err != nil { + r.Recorder.Event( + crRemoteStorageNodeSet, + corev1.EventTypeWarning, + "ControllerError", + "Failed fetching RemoteStorageNodeSet on remote cluster before status update", + ) + return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err + } + + storageNodeSet := &ydbv1alpha1.StorageNodeSet{} + err = r.Client.Get(ctx, types.NamespacedName{ Name: remoteStorageNodeSet.Name, Namespace: remoteStorageNodeSet.Namespace, - }, &storageNodeSet) + }, storageNodeSet) if err != nil { - if errors.IsNotFound(err) { - r.Recorder.Event( - remoteStorageNodeSet, - corev1.EventTypeWarning, - "ProvisioningFailed", - fmt.Sprintf("StorageNodeSet with name %s was not found: %s", remoteStorageNodeSet.Name, err), - ) - return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, nil - } r.Recorder.Event( - remoteStorageNodeSet, + crRemoteStorageNodeSet, corev1.EventTypeWarning, "ControllerError", - fmt.Sprintf("Failed to get StorageNodeSet: %s", err), + "Failed fetching StorageNodeSet before status update", ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err } - oldStatus := remoteStorageNodeSet.Status.State - remoteStorageNodeSet.Status.State = storageNodeSet.Status.State - remoteStorageNodeSet.Status.Conditions = storageNodeSet.Status.Conditions + oldStatus := crRemoteStorageNodeSet.Status.State + crRemoteStorageNodeSet.Status.State = storageNodeSet.Status.State + crRemoteStorageNodeSet.Status.Conditions = storageNodeSet.Status.Conditions - err = r.RemoteClient.Status().Update(ctx, remoteStorageNodeSet) + err = r.RemoteClient.Status().Update(ctx, crRemoteStorageNodeSet) if err != nil { r.Recorder.Event( - remoteStorageNodeSet, + crRemoteStorageNodeSet, corev1.EventTypeWarning, "ControllerError", fmt.Sprintf("Failed setting status on remote cluster: %s", err), ) r.RemoteRecorder.Event( - remoteStorageNodeSet, + crRemoteStorageNodeSet, corev1.EventTypeWarning, "ControllerError", fmt.Sprintf("Failed setting status: %s", err), ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err - } else if oldStatus != storageNodeSet.Status.State { + } else if oldStatus != crRemoteStorageNodeSet.Status.State { r.Recorder.Event( - remoteStorageNodeSet, + crRemoteStorageNodeSet, corev1.EventTypeNormal, "StatusChanged", - fmt.Sprintf("RemoteStorageNodeSet moved from %s to %s on remote cluster", oldStatus, remoteStorageNodeSet.Status.State), + fmt.Sprintf("StorageNodeSet moved from %s to %s on remote cluster", oldStatus, crRemoteStorageNodeSet.Status.State), ) r.RemoteRecorder.Event( - remoteStorageNodeSet, + crRemoteStorageNodeSet, corev1.EventTypeNormal, "StatusChanged", - fmt.Sprintf("RemoteStorageNodeSet moved from %s to %s", oldStatus, remoteStorageNodeSet.Status.State), + fmt.Sprintf("StorageNodeSet moved from %s to %s", oldStatus, crRemoteStorageNodeSet.Status.State), ) } diff --git a/internal/controllers/storage/sync.go b/internal/controllers/storage/sync.go index 57c185d2..fcc1c0b4 100644 --- a/internal/controllers/storage/sync.go +++ b/internal/controllers/storage/sync.go @@ -469,12 +469,12 @@ func (r *Reconciler) setState( fmt.Sprintf("Failed setting status: %s", err), ) return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err - } else if oldStatus != storage.Status.State { + } else if oldStatus != storageCr.Status.State { r.Recorder.Event( storageCr, corev1.EventTypeNormal, "StatusChanged", - fmt.Sprintf("Storage moved from %s to %s", oldStatus, storage.Status.State), + fmt.Sprintf("Storage moved from %s to %s", oldStatus, storageCr.Status.State), ) } diff --git a/internal/resources/database.go b/internal/resources/database.go index 1d5cd0ac..f0c252ba 100644 --- a/internal/resources/database.go +++ b/internal/resources/database.go @@ -206,7 +206,6 @@ func (b *DatabaseBuilder) getNodeSetBuilders(databaseLabels labels.Labels) []Res nodeSetLabels = nodeSetLabels.Merge(map[string]string{labels.DatabaseNodeSetComponent: nodeSetSpecInline.Name}) databaseNodeSetSpec := b.recastDatabaseNodeSetSpecInline(nodeSetSpecInline.DeepCopy()) - if nodeSetSpecInline.Remote != nil { nodeSetLabels = nodeSetLabels.Merge(map[string]string{ labels.RemoteClusterKey: nodeSetSpecInline.Remote.Cluster, @@ -245,7 +244,7 @@ func (b *DatabaseBuilder) recastDatabaseNodeSetSpecInline(nodeSetSpecInline *api nodeSetSpec.DatabaseRef = api.NamespacedRef{ Name: b.Name, - Namespace: b.GetNamespace(), + Namespace: b.Namespace, } nodeSetSpec.DatabaseClusterSpec = b.Spec.DatabaseClusterSpec diff --git a/internal/resources/remotedatabasenodeset.go b/internal/resources/remotedatabasenodeset.go index 954cf02d..12a01b49 100644 --- a/internal/resources/remotedatabasenodeset.go +++ b/internal/resources/remotedatabasenodeset.go @@ -40,7 +40,7 @@ func (b *RemoteDatabaseNodeSetBuilder) Build(obj client.Object) error { } func (b *RemoteDatabaseNodeSetBuilder) Placeholder(cr client.Object) client.Object { - return &api.DatabaseNodeSet{ + return &api.RemoteDatabaseNodeSet{ ObjectMeta: metav1.ObjectMeta{ Name: b.Name, Namespace: cr.GetNamespace(), From fb28a3349de985074ec078b367f73505771e91f4 Mon Sep 17 00:00:00 2001 From: Aleksei Kobzev Date: Tue, 13 Feb 2024 19:19:09 +0300 Subject: [PATCH 24/24] using cluster cache instead of manager --- cmd/ydb-kubernetes-operator/main.go | 10 +- .../remotedatabasenodeset/controller.go | 24 +++- .../remotedatabasenodeset/controller_test.go | 5 +- .../controllers/remotedatabasenodeset/sync.go | 25 +--- .../remotestoragenodeset/controller.go | 29 +++- .../remotestoragenodeset/controller_test.go | 127 +++++++----------- .../controllers/remotestoragenodeset/sync.go | 25 +--- 7 files changed, 108 insertions(+), 137 deletions(-) diff --git a/cmd/ydb-kubernetes-operator/main.go b/cmd/ydb-kubernetes-operator/main.go index d85f720a..c6fb9c40 100644 --- a/cmd/ydb-kubernetes-operator/main.go +++ b/cmd/ydb-kubernetes-operator/main.go @@ -193,18 +193,16 @@ func main() { } if err = (&remotestoragenodeset.Reconciler{ - Client: mgr.GetClient(), - RemoteClient: remoteCluster.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), }).SetupWithManager(mgr, &remoteCluster); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RemoteStorageNodeSet") os.Exit(1) } if err = (&remotedatabasenodeset.Reconciler{ - Client: mgr.GetClient(), - RemoteClient: remoteCluster.GetClient(), - Scheme: mgr.GetScheme(), + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), }).SetupWithManager(mgr, &remoteCluster); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RemoteDatabaseNodeSet") os.Exit(1) diff --git a/internal/controllers/remotedatabasenodeset/controller.go b/internal/controllers/remotedatabasenodeset/controller.go index a5199c04..abbc590d 100644 --- a/internal/controllers/remotedatabasenodeset/controller.go +++ b/internal/controllers/remotedatabasenodeset/controller.go @@ -11,16 +11,20 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/cluster" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/annotations" . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck ydblabels "github.com/ydb-platform/ydb-kubernetes-operator/internal/labels" ) @@ -56,7 +60,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // on deleted requests. if err := r.RemoteClient.Get(ctx, req.NamespacedName, remoteDatabaseNodeSet); err != nil { if apierrors.IsNotFound(err) { - logger.Info("RemoteDatabaseNodeSet has been deleted on remote cluster") + logger.Info("RemoteDatabaseNodeSet resource not found on remote cluster") return ctrl.Result{Requeue: false}, nil } logger.Error(err, "unable to get RemoteDatabaseNodeSet on remote cluster") @@ -125,6 +129,21 @@ func (r *Reconciler) deleteExternalResources(ctx context.Context, key types.Name return nil } +func ignoreDeletionPredicate() predicate.Predicate { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + generationChanged := e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + annotationsChanged := !annotations.CompareYdbTechAnnotations(e.ObjectOld.GetAnnotations(), e.ObjectNew.GetAnnotations()) + + return generationChanged || annotationsChanged + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + } +} + // SetupWithManager sets up the controller with the Manager. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.Cluster) error { cluster := *remoteCluster @@ -137,6 +156,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.C r.Recorder = mgr.GetEventRecorderFor(resourceGVK.Kind) r.RemoteRecorder = cluster.GetEventRecorderFor(resourceGVK.Kind) + r.RemoteClient = cluster.GetClient() annotationFilter := func(mapObj client.Object) []reconcile.Request { requests := make([]reconcile.Request, 0) @@ -158,7 +178,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.C return ctrl.NewControllerManagedBy(mgr). Named(resourceGVK.Kind). - Watches(source.NewKindWithCache(resource, cluster.GetCache()), &handler.EnqueueRequestForObject{}). + Watches(source.NewKindWithCache(resource, cluster.GetCache()), &handler.EnqueueRequestForObject{}, builder.WithPredicates(ignoreDeletionPredicate())). Watches(&source.Kind{Type: &api.DatabaseNodeSet{}}, handler.EnqueueRequestsFromMapFunc(annotationFilter)). Complete(r) } diff --git a/internal/controllers/remotedatabasenodeset/controller_test.go b/internal/controllers/remotedatabasenodeset/controller_test.go index 3cf49803..63ff1dc2 100644 --- a/internal/controllers/remotedatabasenodeset/controller_test.go +++ b/internal/controllers/remotedatabasenodeset/controller_test.go @@ -149,9 +149,8 @@ var _ = BeforeSuite(func() { Expect(err).ShouldNot(HaveOccurred()) err = (&remotedatabasenodeset.Reconciler{ - Client: remoteManager.GetClient(), - RemoteClient: localManager.GetClient(), - Scheme: remoteManager.GetScheme(), + Client: remoteManager.GetClient(), + Scheme: remoteManager.GetScheme(), }).SetupWithManager(remoteManager, &remoteCluster) Expect(err).ShouldNot(HaveOccurred()) diff --git a/internal/controllers/remotedatabasenodeset/sync.go b/internal/controllers/remotedatabasenodeset/sync.go index 68797e4e..8d3d4a93 100644 --- a/internal/controllers/remotedatabasenodeset/sync.go +++ b/internal/controllers/remotedatabasenodeset/sync.go @@ -27,7 +27,7 @@ func (r *Reconciler) Sync(ctx context.Context, crRemoteDatabaseNodeSet *ydbv1alp return result, err } - stop, result, err = r.updateStatus(ctx, &remoteDatabaseNodeSet) + stop, result, err = r.updateStatus(ctx, crRemoteDatabaseNodeSet) if stop { return result, err } @@ -96,29 +96,14 @@ func (r *Reconciler) handleResourcesSync( func (r *Reconciler) updateStatus( ctx context.Context, - remoteDatabaseNodeSet *resources.RemoteDatabaseNodeSetResource, + crRemoteDatabaseNodeSet *ydbv1alpha1.RemoteDatabaseNodeSet, ) (bool, ctrl.Result, error) { r.Log.Info("running step updateStatus") - crRemoteDatabaseNodeSet := &ydbv1alpha1.RemoteDatabaseNodeSet{} - err := r.RemoteClient.Get(ctx, types.NamespacedName{ - Namespace: remoteDatabaseNodeSet.Namespace, - Name: remoteDatabaseNodeSet.Name, - }, crRemoteDatabaseNodeSet) - if err != nil { - r.Recorder.Event( - crRemoteDatabaseNodeSet, - corev1.EventTypeWarning, - "ControllerError", - "Failed fetching RemoteDatabaseNodeSet on remote cluster before status update", - ) - return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err - } - databaseNodeSet := &ydbv1alpha1.DatabaseNodeSet{} - err = r.Client.Get(ctx, types.NamespacedName{ - Name: remoteDatabaseNodeSet.Name, - Namespace: remoteDatabaseNodeSet.Namespace, + err := r.Client.Get(ctx, types.NamespacedName{ + Name: crRemoteDatabaseNodeSet.Name, + Namespace: crRemoteDatabaseNodeSet.Namespace, }, databaseNodeSet) if err != nil { r.Recorder.Event( diff --git a/internal/controllers/remotestoragenodeset/controller.go b/internal/controllers/remotestoragenodeset/controller.go index 9fca2d10..6f0edad9 100644 --- a/internal/controllers/remotestoragenodeset/controller.go +++ b/internal/controllers/remotestoragenodeset/controller.go @@ -11,16 +11,20 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/cluster" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" + "github.com/ydb-platform/ydb-kubernetes-operator/internal/annotations" . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck ydblabels "github.com/ydb-platform/ydb-kubernetes-operator/internal/labels" ) @@ -51,15 +55,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu logger := log.FromContext(ctx) remoteStorageNodeSet := &api.RemoteStorageNodeSet{} - // we'll ignore not-found errors, since they can't be fixed by an immediate - // requeue (we'll need to wait for a new notification), and we can get them - // on deleted requests. if err := r.RemoteClient.Get(ctx, req.NamespacedName, remoteStorageNodeSet); err != nil { if apierrors.IsNotFound(err) { - logger.Info("StorageNodeSet has been deleted") + logger.Info("RemoteStorageNodeSet resource not found") return ctrl.Result{Requeue: false}, nil } - logger.Error(err, "unable to get RemoteStorageNodeSet") + logger.Error(err, "unable to get RemoteStorageNodeSet on remote cluster") return ctrl.Result{RequeueAfter: DefaultRequeueDelay}, nil } @@ -128,6 +129,21 @@ func (r *Reconciler) deleteExternalResources(ctx context.Context, remoteStorageN return nil } +func ignoreDeletionPredicate() predicate.Predicate { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + generationChanged := e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + annotationsChanged := !annotations.CompareYdbTechAnnotations(e.ObjectOld.GetAnnotations(), e.ObjectNew.GetAnnotations()) + + return generationChanged || annotationsChanged + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + } +} + // SetupWithManager sets up the controller with the Manager. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.Cluster) error { cluster := *remoteCluster @@ -140,6 +156,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.C r.Recorder = mgr.GetEventRecorderFor(resourceGVK.Kind) r.RemoteRecorder = cluster.GetEventRecorderFor(resourceGVK.Kind) + r.RemoteClient = cluster.GetClient() annotationFilter := func(mapObj client.Object) []reconcile.Request { requests := make([]reconcile.Request, 0) @@ -161,7 +178,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, remoteCluster *cluster.C return ctrl.NewControllerManagedBy(mgr). Named(resourceGVK.Kind). - Watches(source.NewKindWithCache(resource, cluster.GetCache()), &handler.EnqueueRequestForObject{}). + Watches(source.NewKindWithCache(resource, cluster.GetCache()), &handler.EnqueueRequestForObject{}, builder.WithPredicates(ignoreDeletionPredicate())). Watches(&source.Kind{Type: &api.StorageNodeSet{}}, handler.EnqueueRequestsFromMapFunc(annotationFilter)). Complete(r) } diff --git a/internal/controllers/remotestoragenodeset/controller_test.go b/internal/controllers/remotestoragenodeset/controller_test.go index fc536e95..1abbda10 100644 --- a/internal/controllers/remotestoragenodeset/controller_test.go +++ b/internal/controllers/remotestoragenodeset/controller_test.go @@ -138,9 +138,8 @@ var _ = BeforeSuite(func() { Expect(err).ShouldNot(HaveOccurred()) err = (&remotestoragenodeset.Reconciler{ - Client: remoteManager.GetClient(), - RemoteClient: localManager.GetClient(), - Scheme: remoteManager.GetScheme(), + Client: remoteManager.GetClient(), + Scheme: remoteManager.GetScheme(), }).SetupWithManager(remoteManager, &remoteCluster) Expect(err).ShouldNot(HaveOccurred()) @@ -222,64 +221,64 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { By("set status Ready to Storage...") foundStorage.Status.State = StorageReady Expect(localClient.Status().Update(ctx, &foundStorage)).Should(Succeed()) - }) - AfterEach(func() { - deleteAll(localEnv, localClient, &localNamespace) - deleteAll(remoteEnv, remoteClient, &localNamespace) - }) - - When("Create Storage with RemoteStorageNodeSet in k8s-mgmt-cluster", func() { - It("Should create StorageNodeSet and sync resources in k8s-data-cluster", func() { - By("checking that StorageNodeSet created on local cluster...") - Eventually(func() bool { - foundStorageNodeSet := api.StorageNodeSetList{} + By("checking that StorageNodeSet created on local cluster...") + Eventually(func() bool { + foundStorageNodeSet := api.StorageNodeSetList{} - Expect(localClient.List(ctx, &foundStorageNodeSet, client.InNamespace( - testobjects.YdbNamespace, - ))).Should(Succeed()) + Expect(localClient.List(ctx, &foundStorageNodeSet, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) - for _, nodeset := range foundStorageNodeSet.Items { - if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-local" { - return true - } + for _, nodeset := range foundStorageNodeSet.Items { + if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-local" { + return true } - return false - }, test.Timeout, test.Interval).Should(BeTrue()) + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) - By("checking that RemoteStorageNodeSet created on local cluster...") - Eventually(func() bool { - foundRemoteStorageNodeSet := api.RemoteStorageNodeSetList{} + By("checking that RemoteStorageNodeSet created on local cluster...") + Eventually(func() bool { + foundRemoteStorageNodeSet := api.RemoteStorageNodeSetList{} - Expect(localClient.List(ctx, &foundRemoteStorageNodeSet, client.InNamespace( - testobjects.YdbNamespace, - ))).Should(Succeed()) + Expect(localClient.List(ctx, &foundRemoteStorageNodeSet, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) - for _, nodeset := range foundRemoteStorageNodeSet.Items { - if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-remote" { - return true - } + for _, nodeset := range foundRemoteStorageNodeSet.Items { + if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-remote" { + return true } - return false - }, test.Timeout, test.Interval).Should(BeTrue()) + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) - By("checking that StorageNodeSet created on remote cluster...") - Eventually(func() bool { - foundStorageNodeSetOnRemote := api.StorageNodeSetList{} + By("checking that StorageNodeSet created on remote cluster...") + Eventually(func() bool { + foundStorageNodeSetOnRemote := api.StorageNodeSetList{} - Expect(remoteClient.List(ctx, &foundStorageNodeSetOnRemote, client.InNamespace( - testobjects.YdbNamespace, - ))).Should(Succeed()) + Expect(remoteClient.List(ctx, &foundStorageNodeSetOnRemote, client.InNamespace( + testobjects.YdbNamespace, + ))).Should(Succeed()) - for _, nodeset := range foundStorageNodeSetOnRemote.Items { - if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-remote" { - return true - } + for _, nodeset := range foundStorageNodeSetOnRemote.Items { + if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-remote" { + return true } - return false - }, test.Timeout, test.Interval).Should(BeTrue()) + } + return false + }, test.Timeout, test.Interval).Should(BeTrue()) + }) - By("Set StorageNodeSet status to Ready on remote cluster...") + AfterEach(func() { + deleteAll(localEnv, localClient, &localNamespace) + deleteAll(remoteEnv, remoteClient, &localNamespace) + }) + + When("Create Storage with RemoteStorageNodeSet in k8s-mgmt-cluster", func() { + It("Should create StorageNodeSet and sync resources in k8s-data-cluster", func() { + By("set StorageNodeSet status to Ready on remote cluster...") foundStorageNodeSetOnRemote := api.StorageNodeSet{} Expect(remoteClient.Get(ctx, types.NamespacedName{ Name: storageSample.Name + "-" + testNodeSetName + "-remote", @@ -317,38 +316,6 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { }) When("Delete Storage with RemoteStorageNodeSet in k8s-mgmt-cluster", func() { It("Should delete all resources in k8s-data-cluster", func() { - By("checking that RemoteStorageNodeSet created on local cluster...") - Eventually(func() bool { - foundRemoteStorageNodeSet := api.RemoteStorageNodeSetList{} - - Expect(localClient.List(ctx, &foundRemoteStorageNodeSet, client.InNamespace( - testobjects.YdbNamespace, - ))).Should(Succeed()) - - for _, nodeset := range foundRemoteStorageNodeSet.Items { - if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-remote" { - return true - } - } - return false - }, test.Timeout, test.Interval).Should(BeTrue()) - - By("checking that StorageNodeSet created on remote cluster...") - Eventually(func() bool { - foundStorageNodeSet := api.StorageNodeSetList{} - - Expect(remoteClient.List(ctx, &foundStorageNodeSet, client.InNamespace( - testobjects.YdbNamespace, - ))).Should(Succeed()) - - for _, nodeset := range foundStorageNodeSet.Items { - if nodeset.Name == storageSample.Name+"-"+testNodeSetName+"-remote" { - return true - } - } - return false - }, test.Timeout, test.Interval).Should(BeTrue()) - By("delete RemoteStorageNodeSet on local cluster...") Eventually(func() error { foundStorage := api.Storage{} diff --git a/internal/controllers/remotestoragenodeset/sync.go b/internal/controllers/remotestoragenodeset/sync.go index 1d425436..1ce2f321 100644 --- a/internal/controllers/remotestoragenodeset/sync.go +++ b/internal/controllers/remotestoragenodeset/sync.go @@ -27,7 +27,7 @@ func (r *Reconciler) Sync(ctx context.Context, crRemoteStorageNodeSet *ydbv1alph return result, err } - stop, result, err = r.updateStatus(ctx, &remoteStorageNodeSet) + stop, result, err = r.updateStatus(ctx, crRemoteStorageNodeSet) if stop { return result, err } @@ -96,29 +96,14 @@ func (r *Reconciler) handleResourcesSync( func (r *Reconciler) updateStatus( ctx context.Context, - remoteStorageNodeSet *resources.RemoteStorageNodeSetResource, + crRemoteStorageNodeSet *ydbv1alpha1.RemoteStorageNodeSet, ) (bool, ctrl.Result, error) { r.Log.Info("running step updateStatus") - crRemoteStorageNodeSet := &ydbv1alpha1.RemoteStorageNodeSet{} - err := r.RemoteClient.Get(ctx, types.NamespacedName{ - Namespace: remoteStorageNodeSet.Namespace, - Name: remoteStorageNodeSet.Name, - }, crRemoteStorageNodeSet) - if err != nil { - r.Recorder.Event( - crRemoteStorageNodeSet, - corev1.EventTypeWarning, - "ControllerError", - "Failed fetching RemoteStorageNodeSet on remote cluster before status update", - ) - return Stop, ctrl.Result{RequeueAfter: DefaultRequeueDelay}, err - } - storageNodeSet := &ydbv1alpha1.StorageNodeSet{} - err = r.Client.Get(ctx, types.NamespacedName{ - Name: remoteStorageNodeSet.Name, - Namespace: remoteStorageNodeSet.Namespace, + err := r.Client.Get(ctx, types.NamespacedName{ + Name: crRemoteStorageNodeSet.Name, + Namespace: crRemoteStorageNodeSet.Namespace, }, storageNodeSet) if err != nil { r.Recorder.Event(