new attempt

This commit is contained in:
Bastian Schnorbus
2025-05-06 23:03:51 +02:00
parent 7783d14826
commit cbefdfba7b
28 changed files with 80 additions and 446 deletions

View File

@@ -28,8 +28,8 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# This variable is used to construct full image tags for bundle and catalog images. # This variable is used to construct full image tags for bundle and catalog images.
# #
# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both # For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both
# rs/tdset-operator-bundle:$VERSION and rs/tdset-operator-catalog:$VERSION. # rs/k8s-operator-playground-bundle:$VERSION and rs/k8s-operator-playground-catalog:$VERSION.
IMAGE_TAG_BASE ?= rs/tdset-operator IMAGE_TAG_BASE ?= rs/k8s-operator-playground
# BUNDLE_IMG defines the image:tag used for the bundle. # BUNDLE_IMG defines the image:tag used for the bundle.
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>) # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
@@ -48,7 +48,7 @@ endif
# Set the Operator SDK version to use. By default, what is installed on the system is used. # Set the Operator SDK version to use. By default, what is installed on the system is used.
# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit.
OPERATOR_SDK_VERSION ?= v1.39.1 OPERATOR_SDK_VERSION ?= v1.39.2
# Image URL to use all building/pushing image targets # Image URL to use all building/pushing image targets
IMG ?= controller:latest IMG ?= controller:latest
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
@@ -159,10 +159,10 @@ PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
docker-buildx: ## Build and push docker image for the manager for cross-platform support docker-buildx: ## Build and push docker image for the manager for cross-platform support
# copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
- $(CONTAINER_TOOL) buildx create --name tdset-operator-builder - $(CONTAINER_TOOL) buildx create --name k8s-operator-playground-builder
$(CONTAINER_TOOL) buildx use tdset-operator-builder $(CONTAINER_TOOL) buildx use k8s-operator-playground-builder
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross .
- $(CONTAINER_TOOL) buildx rm tdset-operator-builder - $(CONTAINER_TOOL) buildx rm k8s-operator-playground-builder
rm Dockerfile.cross rm Dockerfile.cross
.PHONY: build-installer .PHONY: build-installer

View File

@@ -8,7 +8,7 @@ layout:
plugins: plugins:
manifests.sdk.operatorframework.io/v2: {} manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {} scorecard.sdk.operatorframework.io/v2: {}
projectName: tdset-operator projectName: k8s-operator-playground
repo: github.com/baschno/tdset-operator repo: github.com/baschno/tdset-operator
resources: resources:
- api: - api:

View File

@@ -1,4 +1,4 @@
# tdset-operator # k8s-operator-playground
// TODO(user): Add simple overview of use/purpose // TODO(user): Add simple overview of use/purpose
## Description ## Description
@@ -16,7 +16,7 @@
**Build and push your image to the location specified by `IMG`:** **Build and push your image to the location specified by `IMG`:**
```sh ```sh
make docker-build docker-push IMG=<some-registry>/tdset-operator:tag make docker-build docker-push IMG=<some-registry>/k8s-operator-playground:tag
``` ```
**NOTE:** This image ought to be published in the personal registry you specified. **NOTE:** This image ought to be published in the personal registry you specified.
@@ -32,7 +32,7 @@ make install
**Deploy the Manager to the cluster with the image specified by `IMG`:** **Deploy the Manager to the cluster with the image specified by `IMG`:**
```sh ```sh
make deploy IMG=<some-registry>/tdset-operator:tag make deploy IMG=<some-registry>/k8s-operator-playground:tag
``` ```
> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin > **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin
@@ -73,7 +73,7 @@ Following are the steps to build the installer and distribute this project to us
1. Build the installer for the image built and published in the registry: 1. Build the installer for the image built and published in the registry:
```sh ```sh
make build-installer IMG=<some-registry>/tdset-operator:tag make build-installer IMG=<some-registry>/k8s-operator-playground:tag
``` ```
NOTE: The makefile target mentioned above generates an 'install.yaml' NOTE: The makefile target mentioned above generates an 'install.yaml'
@@ -86,7 +86,7 @@ its dependencies.
Users can just run kubectl apply -f <URL for YAML BUNDLE> to install the project, i.e.: Users can just run kubectl apply -f <URL for YAML BUNDLE> to install the project, i.e.:
```sh ```sh
kubectl apply -f https://raw.githubusercontent.com/<org>/tdset-operator/<tag or branch>/dist/install.yaml kubectl apply -f https://raw.githubusercontent.com/<org>/k8s-operator-playground/<tag or branch>/dist/install.yaml
``` ```
## Contributing ## Contributing
@@ -112,3 +112,21 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
## What have I done?
Source: https://shahin-mahmud.medium.com/write-your-first-kubernetes-operator-in-go-177047337eae
1. Initialize project
operator-sdk init --domain rs --repo github.com/baschno/tdset-operator --plugins=go/v4
2. Create CRDs, go types, controller, etc..
operator-sdk create api --group schedule --version v1 --kind TDSet --resource --controller
--group -> resource group name
Create controller w/o prompting
Create resources w/o prompting
3. Do changes to the go types
4. Run `make generate`
5. Run `make manifests`

View File

@@ -23,20 +23,18 @@ import (
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// Important: Run "make" to regenerate code after modifying this file // Container related properties
// Container defines container related properties.
type Container struct { type Container struct {
Image string `json:"image"` Image string `json:"image"`
Port int `json:"port"` Port int `json:"port"`
} }
// Service defines service related properties. // Service related properties
type Service struct { type Service struct {
Port int `json:"port"` Port int `json:"port"`
} }
// SchedulingConfig defines scheduling related properties. // Scheduling related properties
type Scheduling struct { type Scheduling struct {
// +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Maximum=23 // +kubebuilder:validation:Maximum=23
@@ -45,20 +43,28 @@ type Scheduling struct {
// +kubebuilder:validation:Maximum=23 // +kubebuilder:validation:Maximum=23
EndTime int `json:"endTime"` EndTime int `json:"endTime"`
// +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Minimum=0
Replica int `json:"replica"` // +kubebuilder:validation:Maximum=6
Replicas int `json:"replicas"`
} }
// TDSetSpec defines the desired state of TDSet // TDSetSpec defines the desired state of TDSet
type TDSetSpec struct { type TDSetSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// +kubebuilder:validation:Required // +kubebuilder:validation:Required
Container Container `json:"container"` Container Container `json:"container"`
// +kubebuilder:validation:Optional // +kubebuilder:validation:Optional
Service Service `json:"service,omitempty"` Service Service `json:"service,omitempty"`
// +kubebuilder:validation:Required // +kubebuilder:validation:Required
SchedulingConfig []*Scheduling `json:"schedulingConfig"` SchedulingConfig []*Scheduling `json:"schedulingConfig"`
// +kubebuilder:validation:Required // +kubebuilder:validation:Required
// +kubebuilder:validation:Minimum=1 // +kubebuilder:Minimum:1
DefaultReplica int32 `json:"defaultReplica"` DefaultReplicas int32 `json:"defaultReplicas,omitempty"`
// +kubebuilder:validation:Optional // +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=1440 // +kubebuilder:validation:Maximum=1440

View File

@@ -36,6 +36,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook"
schedulev1 "github.com/baschno/tdset-operator/api/v1" schedulev1 "github.com/baschno/tdset-operator/api/v1"
"github.com/baschno/tdset-operator/internal/controller"
// +kubebuilder:scaffold:imports // +kubebuilder:scaffold:imports
) )

View File

@@ -40,7 +40,7 @@ spec:
description: TDSetSpec defines the desired state of TDSet description: TDSetSpec defines the desired state of TDSet
properties: properties:
container: container:
description: Container defines container related properties. description: Container related properties
properties: properties:
image: image:
type: string type: string
@@ -50,9 +50,8 @@ spec:
- image - image
- port - port
type: object type: object
defaultReplica: defaultReplicas:
format: int32 format: int32
minimum: 1
type: integer type: integer
intervalMint: intervalMint:
format: int32 format: int32
@@ -61,13 +60,14 @@ spec:
type: integer type: integer
schedulingConfig: schedulingConfig:
items: items:
description: SchedulingConfig defines scheduling related properties. description: Scheduling related properties
properties: properties:
endTime: endTime:
maximum: 23 maximum: 23
minimum: 0 minimum: 0
type: integer type: integer
replica: replicas:
maximum: 6
minimum: 0 minimum: 0
type: integer type: integer
startTime: startTime:
@@ -76,12 +76,12 @@ spec:
type: integer type: integer
required: required:
- endTime - endTime
- replica - replicas
- startTime - startTime
type: object type: object
type: array type: array
service: service:
description: Service defines service related properties. description: Service related properties
properties: properties:
port: port:
type: integer type: integer
@@ -90,7 +90,7 @@ spec:
type: object type: object
required: required:
- container - container
- defaultReplica - defaultReplicas
- schedulingConfig - schedulingConfig
type: object type: object
status: status:

View File

@@ -1,12 +1,12 @@
# Adds namespace to all resources. # Adds namespace to all resources.
namespace: tdset-operator-system namespace: k8s-operator-playground-system
# Value of this field is prepended to the # Value of this field is prepended to the
# names of all resources, e.g. a deployment named # names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress". # "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace # Note that it should also match with the prefix (text before '-') of the namespace
# field above. # field above.
namePrefix: tdset-operator- namePrefix: k8s-operator-playground-
# Labels to add to all resources and selectors. # Labels to add to all resources and selectors.
#labels: #labels:

View File

@@ -3,7 +3,7 @@ kind: Service
metadata: metadata:
labels: labels:
control-plane: controller-manager control-plane: controller-manager
app.kubernetes.io/name: tdset-operator app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize app.kubernetes.io/managed-by: kustomize
name: controller-manager-metrics-service name: controller-manager-metrics-service
namespace: system namespace: system

View File

@@ -3,7 +3,7 @@ kind: Namespace
metadata: metadata:
labels: labels:
control-plane: controller-manager control-plane: controller-manager
app.kubernetes.io/name: tdset-operator app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize app.kubernetes.io/managed-by: kustomize
name: system name: system
--- ---
@@ -14,7 +14,7 @@ metadata:
namespace: system namespace: system
labels: labels:
control-plane: controller-manager control-plane: controller-manager
app.kubernetes.io/name: tdset-operator app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize app.kubernetes.io/managed-by: kustomize
spec: spec:
selector: selector:

View File

@@ -1,7 +1,7 @@
# These resources constitute the fully configured set of manifests # These resources constitute the fully configured set of manifests
# used to generate the 'manifests/' directory in a bundle. # used to generate the 'manifests/' directory in a bundle.
resources: resources:
- bases/tdset-operator.clusterserviceversion.yaml - bases/k8s-operator-playground.clusterserviceversion.yaml
- ../default - ../default
- ../samples - ../samples
- ../scorecard - ../scorecard

View File

@@ -5,7 +5,7 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy kind: NetworkPolicy
metadata: metadata:
labels: labels:
app.kubernetes.io/name: tdset-operator app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize app.kubernetes.io/managed-by: kustomize
name: allow-metrics-traffic name: allow-metrics-traffic
namespace: system namespace: system

View File

@@ -4,7 +4,7 @@ kind: ServiceMonitor
metadata: metadata:
labels: labels:
control-plane: controller-manager control-plane: controller-manager
app.kubernetes.io/name: tdset-operator app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize app.kubernetes.io/managed-by: kustomize
name: controller-manager-metrics-monitor name: controller-manager-metrics-monitor
namespace: system namespace: system

View File

@@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role kind: Role
metadata: metadata:
labels: labels:
app.kubernetes.io/name: tdset-operator app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize app.kubernetes.io/managed-by: kustomize
name: leader-election-role name: leader-election-role
rules: rules:

View File

@@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: RoleBinding
metadata: metadata:
labels: labels:
app.kubernetes.io/name: tdset-operator app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize app.kubernetes.io/managed-by: kustomize
name: leader-election-rolebinding name: leader-election-rolebinding
roleRef: roleRef:

View File

@@ -4,18 +4,6 @@ kind: ClusterRole
metadata: metadata:
name: manager-role name: manager-role
rules: rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups: - apiGroups:
- schedule.rs - schedule.rs
resources: resources:

View File

@@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
labels: labels:
app.kubernetes.io/name: tdset-operator app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize app.kubernetes.io/managed-by: kustomize
name: manager-rolebinding name: manager-rolebinding
roleRef: roleRef:

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
labels: labels:
app.kubernetes.io/name: tdset-operator app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize app.kubernetes.io/managed-by: kustomize
name: controller-manager name: controller-manager
namespace: system namespace: system

View File

@@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
labels: labels:
app.kubernetes.io/name: tdset-operator app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize app.kubernetes.io/managed-by: kustomize
name: tdset-editor-role name: tdset-editor-role
rules: rules:

View File

@@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
labels: labels:
app.kubernetes.io/name: tdset-operator app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize app.kubernetes.io/managed-by: kustomize
name: tdset-viewer-role name: tdset-viewer-role
rules: rules:

View File

@@ -2,7 +2,7 @@ apiVersion: schedule.rs/v1
kind: TDSet kind: TDSet
metadata: metadata:
labels: labels:
app.kubernetes.io/name: tdset-operator app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize app.kubernetes.io/managed-by: kustomize
name: tdset-sample name: tdset-sample
spec: spec:

View File

@@ -4,7 +4,7 @@
entrypoint: entrypoint:
- scorecard-test - scorecard-test
- basic-check-spec - basic-check-spec
image: quay.io/operator-framework/scorecard-test:v1.39.1 image: quay.io/operator-framework/scorecard-test:v1.39.2
labels: labels:
suite: basic suite: basic
test: basic-check-spec-test test: basic-check-spec-test

View File

@@ -4,7 +4,7 @@
entrypoint: entrypoint:
- scorecard-test - scorecard-test
- olm-bundle-validation - olm-bundle-validation
image: quay.io/operator-framework/scorecard-test:v1.39.1 image: quay.io/operator-framework/scorecard-test:v1.39.2
labels: labels:
suite: olm suite: olm
test: olm-bundle-validation-test test: olm-bundle-validation-test
@@ -14,7 +14,7 @@
entrypoint: entrypoint:
- scorecard-test - scorecard-test
- olm-crds-have-validation - olm-crds-have-validation
image: quay.io/operator-framework/scorecard-test:v1.39.1 image: quay.io/operator-framework/scorecard-test:v1.39.2
labels: labels:
suite: olm suite: olm
test: olm-crds-have-validation-test test: olm-crds-have-validation-test
@@ -24,7 +24,7 @@
entrypoint: entrypoint:
- scorecard-test - scorecard-test
- olm-crds-have-resources - olm-crds-have-resources
image: quay.io/operator-framework/scorecard-test:v1.39.1 image: quay.io/operator-framework/scorecard-test:v1.39.2
labels: labels:
suite: olm suite: olm
test: olm-crds-have-resources-test test: olm-crds-have-resources-test
@@ -34,7 +34,7 @@
entrypoint: entrypoint:
- scorecard-test - scorecard-test
- olm-spec-descriptors - olm-spec-descriptors
image: quay.io/operator-framework/scorecard-test:v1.39.1 image: quay.io/operator-framework/scorecard-test:v1.39.2
labels: labels:
suite: olm suite: olm
test: olm-spec-descriptors-test test: olm-spec-descriptors-test
@@ -44,7 +44,7 @@
entrypoint: entrypoint:
- scorecard-test - scorecard-test
- olm-status-descriptors - olm-status-descriptors
image: quay.io/operator-framework/scorecard-test:v1.39.1 image: quay.io/operator-framework/scorecard-test:v1.39.2
labels: labels:
suite: olm suite: olm
test: olm-status-descriptors-test test: olm-status-descriptors-test

View File

@@ -1,218 +0,0 @@
package controllers
import (
"context"
"fmt"
schedulev1 "github.com/baschno/tdset-operator/api/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
func (r *TDSetReconciler) Deployment(
ctx context.Context, req ctrl.Request,
tdSet *schedulev1.TDSet,
) (*appsv1.Deployment, error) {
log := log.FromContext(ctx)
replicas, err := r.GetExpectedReplica(ctx, req, tdSet)
if err != nil {
log.Error(err, "failed to get expected replica")
return nil, err
}
labels := map[string]string{
"app.kubernetes.io/name": "TDSet",
"app.kubernetes.io/instance": tdSet.Name,
"app.kubernetes.io/version": "v1",
"app.kubernetes.io/part-of": "tdset-operator",
"app.kubernetes.io/created-by": "controller-manager",
}
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: tdSet.Name,
Namespace: tdSet.Namespace,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Image: tdSet.Spec.Container.Image,
Name: tdSet.Name,
ImagePullPolicy: corev1.PullIfNotPresent,
Ports: []corev1.ContainerPort{{
ContainerPort: int32(tdSet.Spec.Container.Port),
Name: "tdset",
}},
}},
},
},
},
}
// Set the ownerRef for the Deployment
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/
if err := ctrl.SetControllerReference(tdSet, dep, r.Scheme); err != nil {
log.Error(err, "failed to set controller owner reference")
return nil, err
}
return dep, nil
}
func (r *TDSetReconciler) DeploymentIfNotExist(
ctx context.Context, req ctrl.Request,
tdSet *schedulev1.TDSet,
) (bool, error) {
log := log.FromContext(ctx)
dep := &appsv1.Deployment{}
err := r.Get(ctx, types.NamespacedName{Name: tdSet.Name, Namespace: tdSet.Namespace}, dep)
if err != nil && apierrors.IsNotFound(err) {
dep, err := r.Deployment(ctx, req, tdSet)
if err != nil {
log.Error(err, "Failed to define new Deployment resource for TDSet")
err = r.SetCondition(
ctx, req, tdSet, TypeAvailable,
fmt.Sprintf("Failed to create Deployment for TDSet (%s): (%s)", tdSet.Name, err),
)
if err != nil {
return false, err
}
}
log.Info(
"Creating a new Deployment",
"Deployment.Namespace", dep.Namespace,
"Deployment.Name", dep.Name,
)
err = r.Create(ctx, dep)
if err != nil {
log.Error(
err, "Failed to create new Deployment",
"Deployment.Namespace", dep.Namespace,
"Deployment.Name", dep.Name,
)
return false, err
}
err = r.GetTDSet(ctx, req, tdSet)
if err != nil {
log.Error(err, "Failed to re-fetch TDSet")
return false, err
}
err = r.SetCondition(
ctx, req, tdSet, TypeProgressing,
fmt.Sprintf("Created Deployment for the TDSet: (%s)", tdSet.Name),
)
if err != nil {
return false, err
}
return true, nil
}
if err != nil {
log.Error(err, "Failed to get Deployment")
return false, err
}
return false, nil
}
func (r *TDSetReconciler) UpdateDeploymentReplica(
ctx context.Context, req ctrl.Request,
tdSet *schedulev1.TDSet,
) error {
log := log.FromContext(ctx)
dep := &appsv1.Deployment{}
err := r.Get(ctx, types.NamespacedName{Name: tdSet.Name, Namespace: tdSet.Namespace}, dep)
if err != nil {
log.Error(err, "Failed to get Deployment")
return err
}
replicas, err := r.GetExpectedReplica(ctx, req, tdSet)
if err != nil {
log.Error(err, "failed to get expected replica")
return err
}
if replicas == *dep.Spec.Replicas {
return nil
}
log.Info(
"Updating a Deployment replica",
"Deployment.Namespace", dep.Namespace,
"Deployment.Name", dep.Name,
)
dep.Spec.Replicas = &replicas
err = r.Update(ctx, dep)
if err != nil {
log.Error(
err, "Failed to update Deployment",
"Deployment.Namespace", dep.Namespace,
"Deployment.Name", dep.Name,
)
err = r.GetTDSet(ctx, req, tdSet)
if err != nil {
log.Error(err, "Failed to re-fetch TDSet")
return err
}
err = r.SetCondition(
ctx, req, tdSet, TypeProgressing,
fmt.Sprintf("Failed to update replica for the TDSet (%s): (%s)", tdSet.Name, err),
)
if err != nil {
return err
}
return nil
}
err = r.GetTDSet(ctx, req, tdSet)
if err != nil {
log.Error(err, "Failed to re-fetch TDSet")
return err
}
err = r.SetCondition(
ctx, req, tdSet, TypeProgressing,
fmt.Sprintf("Updated replica for the TDSet (%s)", tdSet.Name),
)
if err != nil {
return err
}
return nil
}

View File

@@ -1,30 +0,0 @@
package controllers
import (
"context"
"time"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log"
schedulev1 "github.com/baschno/tdset-operator/api/v1"
)
func (r *TDSetReconciler) GetExpectedReplica(ctx context.Context, req ctrl.Request, tdSet *schedulev1.TDSet) (int32, error) {
log := log.FromContext(ctx)
if tdSet.Spec.SchedulingConfig != nil && len(tdSet.Spec.SchedulingConfig) != 0 {
now := time.Now()
hour := now.Hour()
log.Info("current server", "hour", hour, "time", now)
for _, config := range tdSet.Spec.SchedulingConfig {
if hour >= config.StartTime && hour < config.EndTime {
return int32(config.Replica), nil
}
}
}
return tdSet.Spec.DefaultReplica, nil
}

View File

@@ -1,76 +0,0 @@
package controllers
import (
"context"
"sigs.k8s.io/controller-runtime/pkg/log"
schedulev1 "github.com/baschno/tdset-operator/api/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
)
// ConditionStatus defines TDSet condition status.
type ConditionStatus string
// Defines TDSet condition status.
const (
TypeAvailable ConditionStatus = "Available"
TypeProgressing ConditionStatus = "Progressing"
TypeDegraded ConditionStatus = "Degraded"
)
// GetTDSet gets the TDSet from api server.
func (r *TDSetReconciler) GetTDSet(ctx context.Context, req ctrl.Request, tdSet *schedulev1.TDSet) error {
err := r.Get(ctx, req.NamespacedName, tdSet)
if err != nil {
return err
}
return nil
}
// SetInitialCondition sets the status condition of the TDSet to available initially
// when no condition exists yet.
func (r *TDSetReconciler) SetInitialCondition(ctx context.Context, req ctrl.Request, tdSet *schedulev1.TDSet) error {
if tdSet.Status.Conditions != nil || len(tdSet.Status.Conditions) != 0 {
return nil
}
err := r.SetCondition(ctx, req, tdSet, TypeAvailable, "Starting reconciliation")
return err
}
// SetCondition sets the status condition of the TDSet.
func (r *TDSetReconciler) SetCondition(
ctx context.Context, req ctrl.Request,
tdSet *schedulev1.TDSet, condition ConditionStatus,
message string,
) error {
log := log.FromContext(ctx)
meta.SetStatusCondition(
&tdSet.Status.Conditions,
metav1.Condition{
Type: string(condition),
Status: metav1.ConditionUnknown, Reason: "Reconciling",
Message: message,
},
)
if err := r.Status().Update(ctx, tdSet); err != nil {
log.Error(err, "Failed to update TDSet status")
return err
}
if err := r.Get(ctx, req.NamespacedName, tdSet); err != nil {
log.Error(err, "Failed to re-fetch TDSet")
return err
}
return nil
}

View File

@@ -18,9 +18,7 @@ package controller
import ( import (
"context" "context"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime" ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
@@ -29,10 +27,6 @@ import (
schedulev1 "github.com/baschno/tdset-operator/api/v1" schedulev1 "github.com/baschno/tdset-operator/api/v1"
) )
const (
DefaultReconciliationInterval = 5
)
// TDSetReconciler reconciles a TDSet object // TDSetReconciler reconciles a TDSet object
type TDSetReconciler struct { type TDSetReconciler struct {
client.Client client.Client
@@ -42,7 +36,6 @@ type TDSetReconciler struct {
// +kubebuilder:rbac:groups=schedule.rs,resources=tdsets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=schedule.rs,resources=tdsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=schedule.rs,resources=tdsets/status,verbs=get;update;patch // +kubebuilder:rbac:groups=schedule.rs,resources=tdsets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=schedule.rs,resources=tdsets/finalizers,verbs=update // +kubebuilder:rbac:groups=schedule.rs,resources=tdsets/finalizers,verbs=update
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
// Reconcile is part of the main kubernetes reconciliation loop which aims to // Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state. // move the current state of the cluster closer to the desired state.
@@ -54,59 +47,11 @@ type TDSetReconciler struct {
// For more details, check Reconcile and its Result here: // For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile
func (r *TDSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { func (r *TDSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx) _ = log.FromContext(ctx)
log.Info("Starting reconciliation") // TODO(user): your logic here
tdSet := &schedulev1.TDSet{}
// Get TDSet
err := r.GetTDSet(ctx, req, tdSet)
if err != nil {
if apierrors.IsNotFound(err) {
log.Info("TDSet not found - ignoring since object must be deleted")
return ctrl.Result{}, nil return ctrl.Result{}, nil
}
log.Error(err, "Failed to get TDSet")
return ctrl.Result{}, err
}
// Try to set initial status
err = r.SetInitialCondition(ctx, req, tdSet)
if err != nil {
log.Error(err, "Failed to set initial condition")
return ctrl.Result{}, err
}
// TODO Delete finalizer
// Deployment if not exist
ok, err := r.DeploymentIfNotExist(ctx, req, tdSet)
if err != nil {
log.Error(err, "Failed to check deployment for TDSet")
return ctrl.Result{}, err
}
if ok {
return ctrl.Result{RequeueAfter: time.Minute}, nil
}
// Update deployment replica if mismatched
err = r.UpdateDeploymentReplica(ctx, req, tdSet)
if err != nil {
log.Log.Error(err, "Failed to update deployment replica for TDSet")
return ctrl.Result{}, err
}
interval := DefaultReconciliationInterval
if tdSet.Spec.IntervalMint != 0 {
interval = int(tdSet.Spec.IntervalMint)
}
log.Info("Reconciliation done", "RequeueAfter", interval)
return ctrl.Result{RequeueAfter: time.Duration(time.Minute * time.Duration(interval))}, nil
} }
// SetupWithManager sets up the controller with the Manager. // SetupWithManager sets up the controller with the Manager.

View File

@@ -27,6 +27,6 @@ import (
// Run e2e tests using the Ginkgo runner. // Run e2e tests using the Ginkgo runner.
func TestE2E(t *testing.T) { func TestE2E(t *testing.T) {
RegisterFailHandler(Fail) RegisterFailHandler(Fail)
_, _ = fmt.Fprintf(GinkgoWriter, "Starting tdset-operator suite\n") _, _ = fmt.Fprintf(GinkgoWriter, "Starting k8s-operator-playground suite\n")
RunSpecs(t, "e2e suite") RunSpecs(t, "e2e suite")
} }

View File

@@ -27,7 +27,7 @@ import (
"github.com/baschno/tdset-operator/test/utils" "github.com/baschno/tdset-operator/test/utils"
) )
const namespace = "tdset-operator-system" const namespace = "k8s-operator-playground-system"
var _ = Describe("controller", Ordered, func() { var _ = Describe("controller", Ordered, func() {
BeforeAll(func() { BeforeAll(func() {
@@ -60,7 +60,7 @@ var _ = Describe("controller", Ordered, func() {
var err error var err error
// projectimage stores the name of the image used in the example // projectimage stores the name of the image used in the example
var projectimage = "example.com/tdset-operator:v0.0.1" var projectimage = "example.com/k8s-operator-playground:v0.0.1"
By("building the manager(Operator) image") By("building the manager(Operator) image")
cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage)) cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage))