Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Configure Loadbalancer back-end pool only for controlplane machines #2004

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cloud/scope/powervs_machine.go
Original file line number Diff line number Diff line change
@@ -962,7 +962,7 @@ func (m *PowerVSMachineScope) GetMachineInternalIP() string {
return ""
}

// CreateVPCLoadBalancerPoolMember creates a member in load balaner pool.
// CreateVPCLoadBalancerPoolMember creates a member in load balancer pool.
func (m *PowerVSMachineScope) CreateVPCLoadBalancerPoolMember() (*vpcv1.LoadBalancerPoolMember, error) { //nolint:gocyclo
loadBalancers := make([]infrav1beta2.VPCLoadBalancerSpec, 0)
if len(m.IBMPowerVSCluster.Spec.LoadBalancers) == 0 {
27 changes: 20 additions & 7 deletions controllers/ibmpowervsmachine_controller.go
Original file line number Diff line number Diff line change
@@ -198,6 +198,19 @@ func (r *IBMPowerVSMachineReconciler) getOrCreate(scope *scope.PowerVSMachineSco
return instance, err
}

// handleLoadBalancerPoolMemberConfiguration handles loadbalancer pool member creation flow.
func (r *IBMPowerVSMachineReconciler) handleLoadBalancerPoolMemberConfiguration(machineScope *scope.PowerVSMachineScope) (ctrl.Result, error) {
poolMember, err := machineScope.CreateVPCLoadBalancerPoolMember()
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to create loadbalancer pool member %s: %w", machineScope.IBMPowerVSMachine.Name, err)
}
if poolMember != nil && *poolMember.ProvisioningStatus != string(infrav1beta2.VPCLoadBalancerStateActive) {
return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil
}

return ctrl.Result{}, nil
}

func (r *IBMPowerVSMachineReconciler) reconcileNormal(machineScope *scope.PowerVSMachineScope) (ctrl.Result, error) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should create another issue for refactoring these functions to accept the context from caller, like reconcileNormal(ctx context.Context, machineScope *scope.PowerVSMachineScope)
We are not fully utilizing the contextual logging, May be this can be the first step.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Issue created - #2018.
Updated portions using machinescope.Info to contextual logging

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you.

machineScope.Info("Reconciling IBMPowerVSMachine")

@@ -286,15 +299,15 @@ func (r *IBMPowerVSMachineReconciler) reconcileNormal(machineScope *scope.PowerV
machineScope.Info("updating loadbalancer for machine", "name", machineScope.IBMPowerVSMachine.Name)
internalIP := machineScope.GetMachineInternalIP()
if internalIP == "" {
machineScope.Info("Unable to update the LoadBalancer, Machine internal IP not yet set", "machine name", machineScope.IBMPowerVSMachine.Name)
machineScope.Info("Unable to update the LoadBalancer, Machine internal IP not yet set", "machineName", machineScope.IBMPowerVSMachine.Name)
return ctrl.Result{}, nil
}
poolMember, err := machineScope.CreateVPCLoadBalancerPoolMember()
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed CreateVPCLoadBalancerPoolMember %s: %w", machineScope.IBMPowerVSMachine.Name, err)
}
if poolMember != nil && *poolMember.ProvisioningStatus != string(infrav1beta2.VPCLoadBalancerStateActive) {
return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil

if util.IsControlPlaneMachine(machineScope.Machine) {
machineScope.Info("Configuring loadbalancer configuration for control plane machine", "machineName", machineScope.IBMPowerVSMachine.Name)
return r.handleLoadBalancerPoolMemberConfiguration(machineScope)
}
machineScope.Info("skipping loadbalancer configuration for worker machine", "machineName", machineScope.IBMPowerVSMachine.Name)

return ctrl.Result{}, nil
}
417 changes: 349 additions & 68 deletions controllers/ibmpowervsmachine_controller_test.go
Original file line number Diff line number Diff line change
@@ -22,7 +22,6 @@ import (
"testing"
"time"

"github.com/IBM-Cloud/power-go-client/power/models"
"go.uber.org/mock/gomock"

corev1 "k8s.io/api/core/v1"
@@ -33,18 +32,24 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"

capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
ctrl "sigs.k8s.io/controller-runtime"

"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

infrav1beta2 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2"
"sigs.k8s.io/cluster-api-provider-ibmcloud/cloud/scope"
"sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs"
"sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/powervs/mock"
mockVPC "sigs.k8s.io/cluster-api-provider-ibmcloud/pkg/cloud/services/vpc/mock"

"github.com/IBM-Cloud/power-go-client/power/models"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/IBM/vpc-go-sdk/vpcv1"
. "github.com/onsi/gomega"
)

@@ -275,14 +280,13 @@ func TestIBMPowerVSMachineReconciler_Delete(t *testing.T) {
g := NewWithT(t)
setup(t)
t.Cleanup(teardown)

pvsmachine := newIBMPowerVSMachine()

machineScope = &scope.PowerVSMachineScope{
Logger: klog.Background(),
IBMPowerVSClient: mockpowervs,
IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{
ObjectMeta: metav1.ObjectMeta{
Finalizers: []string{infrav1beta2.IBMPowerVSMachineFinalizer},
},
},
Logger: klog.Background(),
IBMPowerVSClient: mockpowervs,
IBMPowerVSMachine: pvsmachine,
IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{},
}
_, err := reconciler.reconcileDelete(machineScope)
@@ -316,16 +320,8 @@ func TestIBMPowerVSMachineReconciler_Delete(t *testing.T) {
g := NewWithT(t)
setup(t)
t.Cleanup(teardown)

secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "bootsecret",
Namespace: "default",
},
Data: map[string][]byte{
"value": []byte("user data"),
},
}
secret := newSecret()
machine := newMachine()

mockClient := fake.NewClientBuilder().WithObjects([]client.Object{secret}...).Build()
machineScope = &scope.PowerVSMachineScope{
@@ -343,16 +339,7 @@ func TestIBMPowerVSMachineReconciler_Delete(t *testing.T) {
},
IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{},
DHCPIPCacheStore: cache.NewTTLStore(powervs.CacheKeyFunc, powervs.CacheTTL),
Machine: &capiv1beta1.Machine{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
},
Spec: capiv1beta1.MachineSpec{
Bootstrap: capiv1beta1.Bootstrap{
DataSecretName: ptr.To("bootsecret"),
},
},
},
Machine: machine,
}
mockpowervs.EXPECT().DeleteInstance(machineScope.IBMPowerVSMachine.Status.InstanceID).Return(nil)
_, err := reconciler.reconcileDelete(machineScope)
@@ -368,12 +355,14 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) {
mockCtrl *gomock.Controller
machineScope *scope.PowerVSMachineScope
reconciler IBMPowerVSMachineReconciler
mockvpc *mockVPC.MockVpc
)

setup := func(t *testing.T) {
t.Helper()
mockCtrl = gomock.NewController(t)
mockpowervs = mock.NewMockPowerVS(mockCtrl)
mockvpc = mockVPC.NewMockVpc(mockCtrl)
recorder := record.NewFakeRecorder(2)
reconciler = IBMPowerVSMachineReconciler{
Client: testEnv.Client,
@@ -455,6 +444,8 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) {
setup(t)
t.Cleanup(teardown)
var instances = &models.PVMInstances{}
machine := newMachine()
pvsMachine := newIBMPowerVSMachine()
mockclient := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects().Build()
machineScope = &scope.PowerVSMachineScope{
Logger: klog.Background(),
@@ -464,80 +455,237 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) {
InfrastructureReady: true,
},
},
Machine: &capiv1beta1.Machine{
Spec: capiv1beta1.MachineSpec{
Bootstrap: capiv1beta1.Bootstrap{
DataSecretName: ptr.To("data-secret"),
},
Machine: machine,
IBMPowerVSMachine: pvsMachine,
IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{
Status: infrav1beta2.IBMPowerVSImageStatus{
Ready: true,
},
},
IBMPowerVSMachine: &infrav1beta2.IBMPowerVSMachine{
ObjectMeta: metav1.ObjectMeta{
Name: *ptr.To("capi-test-machine"),
Finalizers: []string{infrav1beta2.IBMPowerVSMachineFinalizer},
IBMPowerVSClient: mockpowervs,
}
mockpowervs.EXPECT().GetAllInstance().Return(instances, nil)

result, err := reconciler.reconcileNormal(machineScope)
g.Expect(err).To(HaveOccurred())
g.Expect(result.RequeueAfter).To(BeZero())
g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer))
expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityError, infrav1beta2.InstanceProvisionFailedReason}})
})

t.Run("Should fail reconcile if creation of the load balancer pool member is unsuccessful", func(t *testing.T) {
g := NewWithT(t)
setup(t)
t.Cleanup(teardown)

secret := newSecret()
pvsmachine := newIBMPowerVSMachine()
machine := newMachine()
machine.Labels = map[string]string{
"cluster.x-k8s.io/control-plane": "true",
}

mockclient := fake.NewClientBuilder().WithObjects([]client.Object{secret, pvsmachine, machine}...).Build()
machineScope = &scope.PowerVSMachineScope{
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can't we call NewPowerVSMachineScope()?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Correct me if I’m wrong, but based on issue #1937, it’s preferred for UT not to rely on the New functions, as their flow shouldn't be within the scope of the parent test.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok, It would be good if can reduce the number of duplicate lines if possible.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

reduced overall lines of code

Logger: klog.Background(),
Client: mockclient,

Cluster: &capiv1beta1.Cluster{
Status: capiv1beta1.ClusterStatus{
InfrastructureReady: true,
},
},
Machine: machine,
IBMPowerVSMachine: pvsmachine,
IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{
Status: infrav1beta2.IBMPowerVSImageStatus{
Ready: true,
},
},
IBMVPCClient: mockvpc,
IBMPowerVSClient: mockpowervs,
DHCPIPCacheStore: cache.NewTTLStore(powervs.CacheKeyFunc, powervs.CacheTTL),
IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"powervs.cluster.x-k8s.io/create-infra": "true",
},
},
Spec: infrav1beta2.IBMPowerVSClusterSpec{
LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{
{
Name: "capi-test-lb",
},
},
},
Status: infrav1beta2.IBMPowerVSClusterStatus{
LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{
"capi-test-lb": {
ID: ptr.To("capi-test-lb-id"),
},
},
},
},
}

instanceReferences := &models.PVMInstances{
PvmInstances: []*models.PVMInstanceReference{
{
PvmInstanceID: ptr.To("capi-test-machine-id"),
ServerName: ptr.To("capi-test-machine"),
},
},
}
instance := &models.PVMInstance{
PvmInstanceID: ptr.To("capi-test-machine-id"),
ServerName: ptr.To("capi-test-machine"),
Status: ptr.To("ACTIVE"),
Networks: []*models.PVMInstanceNetwork{
{
IPAddress: "192.168.7.1",
},
},
}

loadBalancer := &vpcv1.LoadBalancer{
ID: core.StringPtr("capi-test-lb-id"),
ProvisioningStatus: core.StringPtr("active"),
Name: core.StringPtr("capi-test-lb-name"),
}
mockpowervs.EXPECT().GetAllInstance().Return(instances, nil)

mockpowervs.EXPECT().GetAllInstance().Return(instanceReferences, nil)
mockpowervs.EXPECT().GetInstance(gomock.AssignableToTypeOf("capi-test-machine-id")).Return(instance, nil)
mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil)
result, err := reconciler.reconcileNormal(machineScope)
g.Expect(err).To(HaveOccurred())
g.Expect(err).ToNot(BeNil())
g.Expect(result.Requeue).To((BeFalse()))
g.Expect(result.RequeueAfter).To(BeZero())
g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer))
expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionFalse, capiv1beta1.ConditionSeverityError, infrav1beta2.InstanceProvisionFailedReason}})
expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionTrue, "", ""}})
})
t.Run("Should reconcile IBMPowerVSMachine instance creation in different states", func(t *testing.T) {

t.Run("Should requeue if the load balancer pool member is created successfully, but its provisioning status is not active", func(t *testing.T) {
g := NewWithT(t)
setup(t)
t.Cleanup(teardown)
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
capiv1beta1.ClusterNameLabel: "powervs-cluster",

secret := newSecret()
pvsmachine := newIBMPowerVSMachine()
machine := newMachine()
machine.Labels = map[string]string{
"cluster.x-k8s.io/control-plane": "true",
}

mockclient := fake.NewClientBuilder().WithObjects([]client.Object{secret, pvsmachine, machine}...).Build()
machineScope = &scope.PowerVSMachineScope{
Logger: klog.Background(),
Client: mockclient,

Cluster: &capiv1beta1.Cluster{
Status: capiv1beta1.ClusterStatus{
InfrastructureReady: true,
},
Name: "bootsecret",
Namespace: "default",
},
Data: map[string][]byte{
"value": []byte("user data"),
Machine: machine,
IBMPowerVSMachine: pvsmachine,
IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{
Status: infrav1beta2.IBMPowerVSImageStatus{
Ready: true,
},
},
IBMVPCClient: mockvpc,
IBMPowerVSClient: mockpowervs,
DHCPIPCacheStore: cache.NewTTLStore(powervs.CacheKeyFunc, powervs.CacheTTL),
IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"powervs.cluster.x-k8s.io/create-infra": "true",
},
},
Spec: infrav1beta2.IBMPowerVSClusterSpec{
LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{
{
Name: "capi-test-lb",
},
},
},
Status: infrav1beta2.IBMPowerVSClusterStatus{
LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{
"capi-test-lb": {
ID: ptr.To("capi-test-lb-id"),
},
},
},
},
}

pvsmachine := &infrav1beta2.IBMPowerVSMachine{
ObjectMeta: metav1.ObjectMeta{
Name: *ptr.To("capi-test-machine"),
Finalizers: []string{infrav1beta2.IBMPowerVSMachineFinalizer},
instanceReferences := &models.PVMInstances{
PvmInstances: []*models.PVMInstanceReference{
{
PvmInstanceID: ptr.To("capi-test-machine-id"),
ServerName: ptr.To("capi-test-machine"),
},
},
Spec: infrav1beta2.IBMPowerVSMachineSpec{
MemoryGiB: 8,
Processors: intstr.FromString("0.5"),
Image: &infrav1beta2.IBMPowerVSResourceReference{
ID: ptr.To("capi-image-id"),
}
instance := &models.PVMInstance{
PvmInstanceID: ptr.To("capi-test-machine-id"),
ServerName: ptr.To("capi-test-machine"),
Status: ptr.To("ACTIVE"),
Networks: []*models.PVMInstanceNetwork{
{
IPAddress: "192.168.7.1",
},
Network: infrav1beta2.IBMPowerVSResourceReference{
ID: ptr.To("capi-net-id"),
},
}

loadBalancer := &vpcv1.LoadBalancer{
ID: core.StringPtr("capi-test-lb-id"),
ProvisioningStatus: core.StringPtr("active"),
Name: core.StringPtr("capi-test-lb-name"),
Pools: []vpcv1.LoadBalancerPoolReference{
{
ID: core.StringPtr("capi-test-lb-pool-id"),
Name: core.StringPtr("capi-test-lb-pool-name"),
},
ServiceInstanceID: *ptr.To("service-instance-1"),
},
}

machine := &capiv1beta1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: "owner-machine",
Namespace: "default"},
Spec: capiv1beta1.MachineSpec{
Bootstrap: capiv1beta1.Bootstrap{
DataSecretName: ptr.To("bootsecret"),
loadBalancerPoolMemberCollection := &vpcv1.LoadBalancerPoolMemberCollection{
Members: []vpcv1.LoadBalancerPoolMember{
{
ID: core.StringPtr("capi-test-lb-pool-id"),
},
},
}

loadBalancerPoolMember := &vpcv1.LoadBalancerPoolMember{
ID: core.StringPtr("capi-test-lb-pool-member-id"),
ProvisioningStatus: core.StringPtr("update-pending"),
}

mockpowervs.EXPECT().GetAllInstance().Return(instanceReferences, nil)
mockpowervs.EXPECT().GetInstance(gomock.AssignableToTypeOf("capi-test-machine-id")).Return(instance, nil)
mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil)
mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(loadBalancerPoolMemberCollection, &core.DetailedResponse{}, nil)
mockvpc.EXPECT().CreateLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.CreateLoadBalancerPoolMemberOptions{})).Return(loadBalancerPoolMember, &core.DetailedResponse{}, nil)
mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil)
result, err := reconciler.reconcileNormal(machineScope)
g.Expect(err).To(BeNil())
g.Expect(result.RequeueAfter).To(Not(BeZero()))
g.Expect(machineScope.IBMPowerVSMachine.Status.Ready).To(Equal(true))
g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer))
expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionTrue, "", ""}})
})

t.Run("Should reconcile IBMPowerVSMachine instance creation in different states", func(t *testing.T) {
g := NewWithT(t)
setup(t)
t.Cleanup(teardown)

secret := newSecret()
pvsmachine := newIBMPowerVSMachine()
machine := newMachine()

mockclient := fake.NewClientBuilder().WithObjects([]client.Object{secret, pvsmachine, machine}...).Build()
machineScope = &scope.PowerVSMachineScope{
Logger: klog.Background(),
@@ -629,6 +777,90 @@ func TestIBMPowerVSMachineReconciler_ReconcileOperations(t *testing.T) {
})
})
})

t.Run("Should skip creation of loadbalancer pool member if not control plane machine", func(t *testing.T) {
g := NewWithT(t)
setup(t)
t.Cleanup(teardown)
secret := newSecret()
pvsmachine := newIBMPowerVSMachine()
pvsmachine.ObjectMeta.Labels = map[string]string{
"node-role.kubernetes.io/worker": "true",
}
machine := newMachine()

mockclient := fake.NewClientBuilder().WithObjects([]client.Object{secret, pvsmachine, machine}...).Build()

machineScope = &scope.PowerVSMachineScope{
Logger: klog.Background(),
Client: mockclient,

Cluster: &capiv1beta1.Cluster{
Status: capiv1beta1.ClusterStatus{
InfrastructureReady: true,
},
},
Machine: machine,
IBMPowerVSMachine: pvsmachine,
IBMPowerVSImage: &infrav1beta2.IBMPowerVSImage{
Status: infrav1beta2.IBMPowerVSImageStatus{
Ready: true,
},
},
IBMPowerVSClient: mockpowervs,
DHCPIPCacheStore: cache.NewTTLStore(powervs.CacheKeyFunc, powervs.CacheTTL),
IBMPowerVSCluster: &infrav1beta2.IBMPowerVSCluster{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"powervs.cluster.x-k8s.io/create-infra": "true",
},
},
Spec: infrav1beta2.IBMPowerVSClusterSpec{
LoadBalancers: []infrav1beta2.VPCLoadBalancerSpec{
{
Name: "capi-test-lb",
},
},
},
Status: infrav1beta2.IBMPowerVSClusterStatus{
LoadBalancers: map[string]infrav1beta2.VPCLoadBalancerStatus{
"capi-test-lb": {
ID: ptr.To("capi-test-lb-id"),
},
},
},
},
}

instanceReferences := &models.PVMInstances{
PvmInstances: []*models.PVMInstanceReference{
{
PvmInstanceID: ptr.To("capi-test-machine-id"),
ServerName: ptr.To("capi-test-machine"),
},
},
}
instance := &models.PVMInstance{
PvmInstanceID: ptr.To("capi-test-machine-id"),
ServerName: ptr.To("capi-test-machine"),
Status: ptr.To("ACTIVE"),
Networks: []*models.PVMInstanceNetwork{
{
IPAddress: "192.168.7.1",
},
},
}

mockpowervs.EXPECT().GetAllInstance().Return(instanceReferences, nil)
mockpowervs.EXPECT().GetInstance(gomock.AssignableToTypeOf("capi-test-machine-id")).Return(instance, nil)
result, err := reconciler.reconcileNormal(machineScope)
g.Expect(err).To(BeNil())
g.Expect(result.Requeue).To(BeFalse())
g.Expect(result.RequeueAfter).To(BeZero())
g.Expect(machineScope.IBMPowerVSMachine.Status.Ready).To(Equal(true))
g.Expect(machineScope.IBMPowerVSMachine.Finalizers).To(ContainElement(infrav1beta2.IBMPowerVSMachineFinalizer))
expectConditions(g, machineScope.IBMPowerVSMachine, []conditionAssertion{{infrav1beta2.InstanceReadyCondition, corev1.ConditionTrue, "", ""}})
})
}

type conditionAssertion struct {
@@ -662,3 +894,52 @@ func cleanupObject(g *WithT, obj client.Object) {
g.Expect(testEnv.Cleanup(ctx, obj)).To(Succeed())
}
}

func newSecret() *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
capiv1beta1.ClusterNameLabel: "powervs-cluster",
},
Name: "bootsecret",
Namespace: "default",
},
Data: map[string][]byte{
"value": []byte("user data"),
},
}
}

func newIBMPowerVSMachine() *infrav1beta2.IBMPowerVSMachine {
return &infrav1beta2.IBMPowerVSMachine{
ObjectMeta: metav1.ObjectMeta{
Name: *ptr.To("capi-test-machine"),
Finalizers: []string{infrav1beta2.IBMPowerVSMachineFinalizer},
},
Spec: infrav1beta2.IBMPowerVSMachineSpec{
MemoryGiB: 8,
Processors: intstr.FromString("0.5"),
Image: &infrav1beta2.IBMPowerVSResourceReference{
ID: ptr.To("capi-image-id"),
},
Network: infrav1beta2.IBMPowerVSResourceReference{
ID: ptr.To("capi-net-id"),
},
ServiceInstanceID: *ptr.To("service-instance-1"),
},
}
}

func newMachine() *capiv1beta1.Machine {
return &capiv1beta1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: "owner-machine",
Namespace: "default",
},
Spec: capiv1beta1.MachineSpec{
Bootstrap: capiv1beta1.Bootstrap{
DataSecretName: ptr.To("bootsecret"),
},
},
}
}