Compare commits

...

2 Commits

Author SHA1 Message Date
Bastian Schnorbus
ed1aad2e64 savegame 2025-05-07 19:15:23 +02:00
Bastian Schnorbus
cbefdfba7b new attempt 2025-05-06 23:03:51 +02:00
28 changed files with 95 additions and 401 deletions

View File

@@ -28,8 +28,8 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# This variable is used to construct full image tags for bundle and catalog images.
#
# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both
# rs/tdset-operator-bundle:$VERSION and rs/tdset-operator-catalog:$VERSION.
IMAGE_TAG_BASE ?= rs/tdset-operator
# rs/k8s-operator-playground-bundle:$VERSION and rs/k8s-operator-playground-catalog:$VERSION.
IMAGE_TAG_BASE ?= rs/k8s-operator-playground
# BUNDLE_IMG defines the image:tag used for the bundle.
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
@@ -48,7 +48,7 @@ endif
# Set the Operator SDK version to use. By default, what is installed on the system is used.
# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit.
OPERATOR_SDK_VERSION ?= v1.39.1
OPERATOR_SDK_VERSION ?= v1.39.2
# Image URL to use all building/pushing image targets
IMG ?= controller:latest
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
@@ -159,10 +159,10 @@ PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
docker-buildx: ## Build and push docker image for the manager for cross-platform support
# copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
- $(CONTAINER_TOOL) buildx create --name tdset-operator-builder
$(CONTAINER_TOOL) buildx use tdset-operator-builder
- $(CONTAINER_TOOL) buildx create --name k8s-operator-playground-builder
$(CONTAINER_TOOL) buildx use k8s-operator-playground-builder
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross .
- $(CONTAINER_TOOL) buildx rm tdset-operator-builder
- $(CONTAINER_TOOL) buildx rm k8s-operator-playground-builder
rm Dockerfile.cross
.PHONY: build-installer

View File

@@ -8,7 +8,7 @@ layout:
plugins:
manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {}
projectName: tdset-operator
projectName: k8s-operator-playground
repo: github.com/baschno/tdset-operator
resources:
- api:

View File

@@ -1,4 +1,4 @@
# tdset-operator
# k8s-operator-playground
// TODO(user): Add simple overview of use/purpose
## Description
@@ -16,7 +16,7 @@
**Build and push your image to the location specified by `IMG`:**
```sh
make docker-build docker-push IMG=<some-registry>/tdset-operator:tag
make docker-build docker-push IMG=<some-registry>/k8s-operator-playground:tag
```
**NOTE:** This image ought to be published in the personal registry you specified.
@@ -32,7 +32,7 @@ make install
**Deploy the Manager to the cluster with the image specified by `IMG`:**
```sh
make deploy IMG=<some-registry>/tdset-operator:tag
make deploy IMG=<some-registry>/k8s-operator-playground:tag
```
> **NOTE**: If you encounter RBAC errors, you may need to grant yourself cluster-admin
@@ -73,7 +73,7 @@ Following are the steps to build the installer and distribute this project to us
1. Build the installer for the image built and published in the registry:
```sh
make build-installer IMG=<some-registry>/tdset-operator:tag
make build-installer IMG=<some-registry>/k8s-operator-playground:tag
```
NOTE: The makefile target mentioned above generates an 'install.yaml'
@@ -86,7 +86,7 @@ its dependencies.
Users can just run kubectl apply -f <URL for YAML BUNDLE> to install the project, i.e.:
```sh
kubectl apply -f https://raw.githubusercontent.com/<org>/tdset-operator/<tag or branch>/dist/install.yaml
kubectl apply -f https://raw.githubusercontent.com/<org>/k8s-operator-playground/<tag or branch>/dist/install.yaml
```
## Contributing
@@ -112,3 +112,22 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
## What have I done?
Source: https://shahin-mahmud.medium.com/write-your-first-kubernetes-operator-in-go-177047337eae
1. Initialize project
operator-sdk init --domain rs --repo github.com/baschno/tdset-operator --plugins=go/v4
2. Create CRDs, go types, controller, etc..
operator-sdk create api --group schedule --version v1 --kind TDSet --resource --controller
--group -> resource group name
Create controller w/o prompting
Create resources w/o prompting
3. Do changes to the go types
4. Run `make generate`
5. Run `make manifests`
6. Adapt reconciler in `internal/controller` -> `tdset_controller.go`

View File

@@ -23,20 +23,18 @@ import (
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// Important: Run "make" to regenerate code after modifying this file
// Container defines container related properties.
// Container related properties
type Container struct {
Image string `json:"image"`
Port int `json:"port"`
}
// Service defines service related properties.
// Service related properties
type Service struct {
Port int `json:"port"`
}
// SchedulingConfig defines scheduling related properties.
// Scheduling related properties
type Scheduling struct {
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Maximum=23
@@ -45,20 +43,28 @@ type Scheduling struct {
// +kubebuilder:validation:Maximum=23
EndTime int `json:"endTime"`
// +kubebuilder:validation:Minimum=0
Replica int `json:"replica"`
// +kubebuilder:validation:Maximum=6
Replicas int `json:"replicas"`
}
// TDSetSpec defines the desired state of TDSet
type TDSetSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// +kubebuilder:validation:Required
Container Container `json:"container"`
// +kubebuilder:validation:Optional
Service Service `json:"service,omitempty"`
// +kubebuilder:validation:Required
SchedulingConfig []*Scheduling `json:"schedulingConfig"`
// +kubebuilder:validation:Required
// +kubebuilder:validation:Minimum=1
DefaultReplica int32 `json:"defaultReplica"`
// +kubebuilder:Minimum:1
DefaultReplicas int32 `json:"defaultReplicas,omitempty"`
// +kubebuilder:validation:Optional
// +kubebuilder:validation:Minimum=1
// +kubebuilder:validation:Maximum=1440

View File

@@ -36,6 +36,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/webhook"
schedulev1 "github.com/baschno/tdset-operator/api/v1"
"github.com/baschno/tdset-operator/internal/controller"
// +kubebuilder:scaffold:imports
)

View File

@@ -40,7 +40,7 @@ spec:
description: TDSetSpec defines the desired state of TDSet
properties:
container:
description: Container defines container related properties.
description: Container related properties
properties:
image:
type: string
@@ -50,9 +50,8 @@ spec:
- image
- port
type: object
defaultReplica:
defaultReplicas:
format: int32
minimum: 1
type: integer
intervalMint:
format: int32
@@ -61,13 +60,14 @@ spec:
type: integer
schedulingConfig:
items:
description: SchedulingConfig defines scheduling related properties.
description: Scheduling related properties
properties:
endTime:
maximum: 23
minimum: 0
type: integer
replica:
replicas:
maximum: 6
minimum: 0
type: integer
startTime:
@@ -76,12 +76,12 @@ spec:
type: integer
required:
- endTime
- replica
- replicas
- startTime
type: object
type: array
service:
description: Service defines service related properties.
description: Service related properties
properties:
port:
type: integer
@@ -90,7 +90,7 @@ spec:
type: object
required:
- container
- defaultReplica
- defaultReplicas
- schedulingConfig
type: object
status:

View File

@@ -1,12 +1,12 @@
# Adds namespace to all resources.
namespace: tdset-operator-system
namespace: k8s-operator-playground-system
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: tdset-operator-
namePrefix: k8s-operator-playground-
# Labels to add to all resources and selectors.
#labels:

View File

@@ -3,7 +3,7 @@ kind: Service
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: tdset-operator
app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize
name: controller-manager-metrics-service
namespace: system

View File

@@ -3,7 +3,7 @@ kind: Namespace
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: tdset-operator
app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize
name: system
---
@@ -14,7 +14,7 @@ metadata:
namespace: system
labels:
control-plane: controller-manager
app.kubernetes.io/name: tdset-operator
app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize
spec:
selector:

View File

@@ -1,7 +1,7 @@
# These resources constitute the fully configured set of manifests
# used to generate the 'manifests/' directory in a bundle.
resources:
- bases/tdset-operator.clusterserviceversion.yaml
- bases/k8s-operator-playground.clusterserviceversion.yaml
- ../default
- ../samples
- ../scorecard

View File

@@ -5,7 +5,7 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
app.kubernetes.io/name: tdset-operator
app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize
name: allow-metrics-traffic
namespace: system

View File

@@ -4,7 +4,7 @@ kind: ServiceMonitor
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: tdset-operator
app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize
name: controller-manager-metrics-monitor
namespace: system

View File

@@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/name: tdset-operator
app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize
name: leader-election-role
rules:

View File

@@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: tdset-operator
app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize
name: leader-election-rolebinding
roleRef:

View File

@@ -4,18 +4,6 @@ kind: ClusterRole
metadata:
name: manager-role
rules:
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- schedule.rs
resources:

View File

@@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: tdset-operator
app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize
name: manager-rolebinding
roleRef:

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: tdset-operator
app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize
name: controller-manager
namespace: system

View File

@@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: tdset-operator
app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize
name: tdset-editor-role
rules:

View File

@@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: tdset-operator
app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize
name: tdset-viewer-role
rules:

View File

@@ -2,7 +2,7 @@ apiVersion: schedule.rs/v1
kind: TDSet
metadata:
labels:
app.kubernetes.io/name: tdset-operator
app.kubernetes.io/name: k8s-operator-playground
app.kubernetes.io/managed-by: kustomize
name: tdset-sample
spec:

View File

@@ -4,7 +4,7 @@
entrypoint:
- scorecard-test
- basic-check-spec
image: quay.io/operator-framework/scorecard-test:v1.39.1
image: quay.io/operator-framework/scorecard-test:v1.39.2
labels:
suite: basic
test: basic-check-spec-test

View File

@@ -4,7 +4,7 @@
entrypoint:
- scorecard-test
- olm-bundle-validation
image: quay.io/operator-framework/scorecard-test:v1.39.1
image: quay.io/operator-framework/scorecard-test:v1.39.2
labels:
suite: olm
test: olm-bundle-validation-test
@@ -14,7 +14,7 @@
entrypoint:
- scorecard-test
- olm-crds-have-validation
image: quay.io/operator-framework/scorecard-test:v1.39.1
image: quay.io/operator-framework/scorecard-test:v1.39.2
labels:
suite: olm
test: olm-crds-have-validation-test
@@ -24,7 +24,7 @@
entrypoint:
- scorecard-test
- olm-crds-have-resources
image: quay.io/operator-framework/scorecard-test:v1.39.1
image: quay.io/operator-framework/scorecard-test:v1.39.2
labels:
suite: olm
test: olm-crds-have-resources-test
@@ -34,7 +34,7 @@
entrypoint:
- scorecard-test
- olm-spec-descriptors
image: quay.io/operator-framework/scorecard-test:v1.39.1
image: quay.io/operator-framework/scorecard-test:v1.39.2
labels:
suite: olm
test: olm-spec-descriptors-test
@@ -44,7 +44,7 @@
entrypoint:
- scorecard-test
- olm-status-descriptors
image: quay.io/operator-framework/scorecard-test:v1.39.1
image: quay.io/operator-framework/scorecard-test:v1.39.2
labels:
suite: olm
test: olm-status-descriptors-test

View File

@@ -1,218 +0,0 @@
package controllers
import (
"context"
"fmt"
schedulev1 "github.com/baschno/tdset-operator/api/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
func (r *TDSetReconciler) Deployment(
ctx context.Context, req ctrl.Request,
tdSet *schedulev1.TDSet,
) (*appsv1.Deployment, error) {
log := log.FromContext(ctx)
replicas, err := r.GetExpectedReplica(ctx, req, tdSet)
if err != nil {
log.Error(err, "failed to get expected replica")
return nil, err
}
labels := map[string]string{
"app.kubernetes.io/name": "TDSet",
"app.kubernetes.io/instance": tdSet.Name,
"app.kubernetes.io/version": "v1",
"app.kubernetes.io/part-of": "tdset-operator",
"app.kubernetes.io/created-by": "controller-manager",
}
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: tdSet.Name,
Namespace: tdSet.Namespace,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Image: tdSet.Spec.Container.Image,
Name: tdSet.Name,
ImagePullPolicy: corev1.PullIfNotPresent,
Ports: []corev1.ContainerPort{{
ContainerPort: int32(tdSet.Spec.Container.Port),
Name: "tdset",
}},
}},
},
},
},
}
// Set the ownerRef for the Deployment
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/
if err := ctrl.SetControllerReference(tdSet, dep, r.Scheme); err != nil {
log.Error(err, "failed to set controller owner reference")
return nil, err
}
return dep, nil
}
func (r *TDSetReconciler) DeploymentIfNotExist(
ctx context.Context, req ctrl.Request,
tdSet *schedulev1.TDSet,
) (bool, error) {
log := log.FromContext(ctx)
dep := &appsv1.Deployment{}
err := r.Get(ctx, types.NamespacedName{Name: tdSet.Name, Namespace: tdSet.Namespace}, dep)
if err != nil && apierrors.IsNotFound(err) {
dep, err := r.Deployment(ctx, req, tdSet)
if err != nil {
log.Error(err, "Failed to define new Deployment resource for TDSet")
err = r.SetCondition(
ctx, req, tdSet, TypeAvailable,
fmt.Sprintf("Failed to create Deployment for TDSet (%s): (%s)", tdSet.Name, err),
)
if err != nil {
return false, err
}
}
log.Info(
"Creating a new Deployment",
"Deployment.Namespace", dep.Namespace,
"Deployment.Name", dep.Name,
)
err = r.Create(ctx, dep)
if err != nil {
log.Error(
err, "Failed to create new Deployment",
"Deployment.Namespace", dep.Namespace,
"Deployment.Name", dep.Name,
)
return false, err
}
err = r.GetTDSet(ctx, req, tdSet)
if err != nil {
log.Error(err, "Failed to re-fetch TDSet")
return false, err
}
err = r.SetCondition(
ctx, req, tdSet, TypeProgressing,
fmt.Sprintf("Created Deployment for the TDSet: (%s)", tdSet.Name),
)
if err != nil {
return false, err
}
return true, nil
}
if err != nil {
log.Error(err, "Failed to get Deployment")
return false, err
}
return false, nil
}
func (r *TDSetReconciler) UpdateDeploymentReplica(
ctx context.Context, req ctrl.Request,
tdSet *schedulev1.TDSet,
) error {
log := log.FromContext(ctx)
dep := &appsv1.Deployment{}
err := r.Get(ctx, types.NamespacedName{Name: tdSet.Name, Namespace: tdSet.Namespace}, dep)
if err != nil {
log.Error(err, "Failed to get Deployment")
return err
}
replicas, err := r.GetExpectedReplica(ctx, req, tdSet)
if err != nil {
log.Error(err, "failed to get expected replica")
return err
}
if replicas == *dep.Spec.Replicas {
return nil
}
log.Info(
"Updating a Deployment replica",
"Deployment.Namespace", dep.Namespace,
"Deployment.Name", dep.Name,
)
dep.Spec.Replicas = &replicas
err = r.Update(ctx, dep)
if err != nil {
log.Error(
err, "Failed to update Deployment",
"Deployment.Namespace", dep.Namespace,
"Deployment.Name", dep.Name,
)
err = r.GetTDSet(ctx, req, tdSet)
if err != nil {
log.Error(err, "Failed to re-fetch TDSet")
return err
}
err = r.SetCondition(
ctx, req, tdSet, TypeProgressing,
fmt.Sprintf("Failed to update replica for the TDSet (%s): (%s)", tdSet.Name, err),
)
if err != nil {
return err
}
return nil
}
err = r.GetTDSet(ctx, req, tdSet)
if err != nil {
log.Error(err, "Failed to re-fetch TDSet")
return err
}
err = r.SetCondition(
ctx, req, tdSet, TypeProgressing,
fmt.Sprintf("Updated replica for the TDSet (%s)", tdSet.Name),
)
if err != nil {
return err
}
return nil
}

View File

@@ -1,30 +0,0 @@
package controllers
import (
"context"
"time"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log"
schedulev1 "github.com/baschno/tdset-operator/api/v1"
)
func (r *TDSetReconciler) GetExpectedReplica(ctx context.Context, req ctrl.Request, tdSet *schedulev1.TDSet) (int32, error) {
log := log.FromContext(ctx)
if tdSet.Spec.SchedulingConfig != nil && len(tdSet.Spec.SchedulingConfig) != 0 {
now := time.Now()
hour := now.Hour()
log.Info("current server", "hour", hour, "time", now)
for _, config := range tdSet.Spec.SchedulingConfig {
if hour >= config.StartTime && hour < config.EndTime {
return int32(config.Replica), nil
}
}
}
return tdSet.Spec.DefaultReplica, nil
}

View File

@@ -1,76 +0,0 @@
package controllers
import (
"context"
"sigs.k8s.io/controller-runtime/pkg/log"
schedulev1 "github.com/baschno/tdset-operator/api/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
)
// ConditionStatus defines TDSet condition status.
type ConditionStatus string
// Defines TDSet condition status.
const (
TypeAvailable ConditionStatus = "Available"
TypeProgressing ConditionStatus = "Progressing"
TypeDegraded ConditionStatus = "Degraded"
)
// GetTDSet gets the TDSet from api server.
func (r *TDSetReconciler) GetTDSet(ctx context.Context, req ctrl.Request, tdSet *schedulev1.TDSet) error {
err := r.Get(ctx, req.NamespacedName, tdSet)
if err != nil {
return err
}
return nil
}
// SetInitialCondition sets the status condition of the TDSet to available initially
// when no condition exists yet.
func (r *TDSetReconciler) SetInitialCondition(ctx context.Context, req ctrl.Request, tdSet *schedulev1.TDSet) error {
if tdSet.Status.Conditions != nil || len(tdSet.Status.Conditions) != 0 {
return nil
}
err := r.SetCondition(ctx, req, tdSet, TypeAvailable, "Starting reconciliation")
return err
}
// SetCondition sets the status condition of the TDSet.
func (r *TDSetReconciler) SetCondition(
ctx context.Context, req ctrl.Request,
tdSet *schedulev1.TDSet, condition ConditionStatus,
message string,
) error {
log := log.FromContext(ctx)
meta.SetStatusCondition(
&tdSet.Status.Conditions,
metav1.Condition{
Type: string(condition),
Status: metav1.ConditionUnknown, Reason: "Reconciling",
Message: message,
},
)
if err := r.Status().Update(ctx, tdSet); err != nil {
log.Error(err, "Failed to update TDSet status")
return err
}
if err := r.Get(ctx, req.NamespacedName, tdSet); err != nil {
log.Error(err, "Failed to re-fetch TDSet")
return err
}
return nil
}

View File

@@ -20,7 +20,6 @@ import (
"context"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -42,7 +41,6 @@ type TDSetReconciler struct {
// +kubebuilder:rbac:groups=schedule.rs,resources=tdsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=schedule.rs,resources=tdsets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=schedule.rs,resources=tdsets/finalizers,verbs=update
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
@@ -54,47 +52,54 @@ type TDSetReconciler struct {
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile
func (r *TDSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
// TODO(user): your logic here
log := log.FromContext(ctx)
log.Info("Starting reconciliation")
log.Info("starting reconciliation")
tdSet := &schedulev1.TDSet{}
// Get TDSet
// Get the TDSet
err := r.GetTDSet(ctx, req, tdSet)
if err != nil {
if apierrors.IsNotFound(err) {
log.Info("TDSet not found - ignoring since object must be deleted")
log.Info("TDSet resource not found. Ignoring since object must be deleted")
return ctrl.Result{}, nil
}
log.Error(err, "Failed to get TDSet")
return ctrl.Result{}, err
}
// Try to set initial status
// Try to set initial condition status
err = r.SetInitialCondition(ctx, req, tdSet)
if err != nil {
log.Error(err, "Failed to set initial condition")
log.Error(err, "failed to set initial condition")
return ctrl.Result{}, err
}
// TODO Delete finalizer
// TODO: Delete finalizer
// Deployment if not exist
ok, err := r.DeploymentIfNotExist(ctx, req, tdSet)
if err != nil {
log.Error(err, "Failed to check deployment for TDSet")
log.Error(err, "failed to deploy deployment for TDSet")
return ctrl.Result{}, err
}
if ok {
return ctrl.Result{RequeueAfter: time.Minute}, nil
}
// Update deployment replica if mismatched
// Update deployment replica if mis matched.
err = r.UpdateDeploymentReplica(ctx, req, tdSet)
if err != nil {
log.Log.Error(err, "Failed to update deployment replica for TDSet")
log.Error(err, "failed to update deployment for TDSet")
return ctrl.Result{}, err
}
@@ -103,10 +108,9 @@ func (r *TDSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl
interval = int(tdSet.Spec.IntervalMint)
}
log.Info("Reconciliation done", "RequeueAfter", interval)
log.Info("ending reconciliation")
return ctrl.Result{RequeueAfter: time.Duration(time.Minute * time.Duration(interval))}, nil
}
// SetupWithManager sets up the controller with the Manager.

View File

@@ -27,6 +27,6 @@ import (
// Run e2e tests using the Ginkgo runner.
func TestE2E(t *testing.T) {
RegisterFailHandler(Fail)
_, _ = fmt.Fprintf(GinkgoWriter, "Starting tdset-operator suite\n")
_, _ = fmt.Fprintf(GinkgoWriter, "Starting k8s-operator-playground suite\n")
RunSpecs(t, "e2e suite")
}

View File

@@ -27,7 +27,7 @@ import (
"github.com/baschno/tdset-operator/test/utils"
)
const namespace = "tdset-operator-system"
const namespace = "k8s-operator-playground-system"
var _ = Describe("controller", Ordered, func() {
BeforeAll(func() {
@@ -60,7 +60,7 @@ var _ = Describe("controller", Ordered, func() {
var err error
// projectimage stores the name of the image used in the example
var projectimage = "example.com/tdset-operator:v0.0.1"
var projectimage = "example.com/k8s-operator-playground:v0.0.1"
By("building the manager(Operator) image")
cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage))