|
| 1 | +//go:build e2e |
| 2 | +// +build e2e |
| 3 | + |
| 4 | +/* |
| 5 | +Copyright 2021 The Kubernetes Authors. |
| 6 | + |
| 7 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 8 | +you may not use this file except in compliance with the License. |
| 9 | +You may obtain a copy of the License at |
| 10 | + |
| 11 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | + |
| 13 | +Unless required by applicable law or agreed to in writing, software |
| 14 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 15 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | +See the License for the specific language governing permissions and |
| 17 | +limitations under the License. |
| 18 | +*/ |
| 19 | + |
| 20 | +package shared |
| 21 | + |
| 22 | +import ( |
| 23 | + "context" |
| 24 | + "encoding/json" |
| 25 | + "fmt" |
| 26 | + "os" |
| 27 | + "path" |
| 28 | + "path/filepath" |
| 29 | + "strings" |
| 30 | + "time" |
| 31 | + |
| 32 | + . "github.com/onsi/ginkgo/v2" |
| 33 | + . "github.com/onsi/gomega" |
| 34 | + corev1 "k8s.io/api/core/v1" |
| 35 | + "k8s.io/apimachinery/pkg/types" |
| 36 | + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" |
| 37 | + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" |
| 38 | + "sigs.k8s.io/cluster-api/test/framework" |
| 39 | + "sigs.k8s.io/cluster-api/test/framework/clusterctl" |
| 40 | + "sigs.k8s.io/cluster-api/util" |
| 41 | + "sigs.k8s.io/controller-runtime/pkg/client" |
| 42 | + |
| 43 | + infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1" |
| 44 | +) |
| 45 | + |
| 46 | +func SetupSpecNamespace(ctx context.Context, specName string, e2eCtx *E2EContext) *corev1.Namespace { |
| 47 | + Logf("Creating a namespace for hosting the %q test spec", specName) |
| 48 | + namespace, cancelWatches := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{ |
| 49 | + Creator: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), |
| 50 | + ClientSet: e2eCtx.Environment.BootstrapClusterProxy.GetClientSet(), |
| 51 | + Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), |
| 52 | + LogFolder: filepath.Join(e2eCtx.Settings.ArtifactFolder, "clusters", e2eCtx.Environment.BootstrapClusterProxy.GetName()), |
| 53 | + }) |
| 54 | + |
| 55 | + e2eCtx.Environment.Namespaces[namespace] = cancelWatches |
| 56 | + |
| 57 | + return namespace |
| 58 | +} |
| 59 | + |
| 60 | +// DumpSpecResourcesAndCleanup dumps all the resources in the spec namespace. |
| 61 | +// This includes OpenStack resources and all the CAPI/CAPO resources in Kubernetes. |
| 62 | +// It also then cleanups the cluster object and the spec namespace itself. |
| 63 | +func DumpSpecResourcesAndCleanup(ctx context.Context, specName string, namespace *corev1.Namespace, e2eCtx *E2EContext) { |
| 64 | + Logf("Running DumpSpecResourcesAndCleanup for namespace %q", namespace.Name) |
| 65 | + // Dump all Cluster API related resources to artifacts before deleting them. |
| 66 | + cancelWatches := e2eCtx.Environment.Namespaces[namespace] |
| 67 | + |
| 68 | + dumpAllResources := func(directory ...string) { |
| 69 | + dumpSpecResources(ctx, e2eCtx, namespace, directory...) |
| 70 | + dumpOpenStack(ctx, e2eCtx, e2eCtx.Environment.BootstrapClusterProxy.GetName(), directory...) |
| 71 | + } |
| 72 | + |
| 73 | + dumpAllResources() |
| 74 | + |
| 75 | + if !e2eCtx.Settings.SkipCleanup { |
| 76 | + func() { |
| 77 | + defer func() { |
| 78 | + r := recover() |
| 79 | + if r == nil { |
| 80 | + return |
| 81 | + } |
| 82 | + |
| 83 | + // If we fail to delete the cluster, dump all resources again to a different directory before propagating the failure |
| 84 | + dumpAllResources("deletion-failure") |
| 85 | + panic(r) |
| 86 | + }() |
| 87 | + framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{ |
| 88 | + Client: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), |
| 89 | + Namespace: namespace.Name, |
| 90 | + }, e2eCtx.E2EConfig.GetIntervals(specName, "wait-delete-cluster")...) |
| 91 | + }() |
| 92 | + |
| 93 | + Logf("Deleting namespace used for hosting the %q test spec", specName) |
| 94 | + framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ |
| 95 | + Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), |
| 96 | + Name: namespace.Name, |
| 97 | + }) |
| 98 | + } |
| 99 | + cancelWatches() |
| 100 | + delete(e2eCtx.Environment.Namespaces, namespace) |
| 101 | +} |
| 102 | + |
| 103 | +// ClusterForSpec returns the OpenStackCluster in the given namespace. |
| 104 | +// It is considered an error if more than 1 OpenStackCluster is found. |
| 105 | +func ClusterForSpec(ctx context.Context, e2eCtx *E2EContext, namespace *corev1.Namespace) (*infrav1.OpenStackCluster, error) { |
| 106 | + lister := e2eCtx.Environment.BootstrapClusterProxy.GetClient() |
| 107 | + list := new(infrav1.OpenStackClusterList) |
| 108 | + if err := lister.List(ctx, list, client.InNamespace(namespace.GetName())); err != nil { |
| 109 | + return nil, fmt.Errorf("error listing cluster: %v", err) |
| 110 | + } |
| 111 | + if len(list.Items) != 1 { |
| 112 | + return nil, fmt.Errorf("error expected 1 cluster but got %d: %v", len(list.Items), list.Items) |
| 113 | + } |
| 114 | + return &list.Items[0], nil |
| 115 | +} |
| 116 | + |
| 117 | +// dumpSpecResources dumps all CAPI/CAPO resources to yaml. |
| 118 | +func dumpSpecResources(ctx context.Context, e2eCtx *E2EContext, namespace *corev1.Namespace, directory ...string) { |
| 119 | + paths := append([]string{e2eCtx.Settings.ArtifactFolder, "clusters", e2eCtx.Environment.BootstrapClusterProxy.GetName(), "resources"}, directory...) |
| 120 | + framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{ |
| 121 | + Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), |
| 122 | + Namespace: namespace.Name, |
| 123 | + LogPath: filepath.Join(paths...), |
| 124 | + }) |
| 125 | +} |
| 126 | + |
| 127 | +func Logf(format string, a ...interface{}) { |
| 128 | + fmt.Fprintf(GinkgoWriter, "["+time.Now().Format(time.RFC3339)+"] "+format+"\n", a...) |
| 129 | +} |
| 130 | + |
| 131 | +func Debugf(debug bool, format string, a ...interface{}) { |
| 132 | + if debug { |
| 133 | + fmt.Fprintf(GinkgoWriter, "[DEBUG] ["+time.Now().Format(time.RFC3339)+"] "+format+"\n", a...) |
| 134 | + } |
| 135 | +} |
| 136 | + |
| 137 | +// LoadE2EConfig loads the e2econfig from the specified path. |
| 138 | +func LoadE2EConfig(configPath string) *clusterctl.E2EConfig { |
| 139 | + config := clusterctl.LoadE2EConfig(context.TODO(), clusterctl.LoadE2EConfigInput{ConfigPath: configPath}) |
| 140 | + Expect(config).ToNot(BeNil(), "Failed to load E2E config from %s", configPath) |
| 141 | + return config |
| 142 | +} |
| 143 | + |
| 144 | +// SetEnvVar sets an environment variable in the process. If marked private, |
| 145 | +// the value is not printed. |
| 146 | +func SetEnvVar(key, value string, private bool) { |
| 147 | + printableValue := "*******" |
| 148 | + if !private { |
| 149 | + printableValue = value |
| 150 | + } |
| 151 | + |
| 152 | + Logf("Setting environment variable: key=%s, value=%s", key, printableValue) |
| 153 | + _ = os.Setenv(key, value) |
| 154 | +} |
| 155 | + |
| 156 | +// getOpenStackClusterFromMachine gets the OpenStackCluster that is related to the given machine. |
| 157 | +func getOpenStackClusterFromMachine(ctx context.Context, client client.Client, machine *clusterv1.Machine) (*infrav1.OpenStackCluster, error) { |
| 158 | + key := types.NamespacedName{ |
| 159 | + Namespace: machine.Namespace, |
| 160 | + Name: machine.Spec.ClusterName, |
| 161 | + } |
| 162 | + cluster := &clusterv1.Cluster{} |
| 163 | + err := client.Get(ctx, key, cluster) |
| 164 | + if err != nil { |
| 165 | + return nil, err |
| 166 | + } |
| 167 | + |
| 168 | + key = types.NamespacedName{ |
| 169 | + Namespace: cluster.Spec.InfrastructureRef.Namespace, |
| 170 | + Name: cluster.Spec.InfrastructureRef.Name, |
| 171 | + } |
| 172 | + openStackCluster := &infrav1.OpenStackCluster{} |
| 173 | + err = client.Get(ctx, key, openStackCluster) |
| 174 | + return openStackCluster, err |
| 175 | +} |
| 176 | + |
| 177 | +// GetIDFromProviderID returns the server ID part of a provider ID string. |
| 178 | +func GetIDFromProviderID(providerID string) string { |
| 179 | + providerIDSplit := strings.SplitN(providerID, "://", 2) |
| 180 | + Expect(providerIDSplit[0]).To(Equal("openstack")) |
| 181 | + providerIDPathSplit := strings.SplitN(providerIDSplit[1], "/", 2) |
| 182 | + // providerIDPathSplit[0] contain region name, could be empty |
| 183 | + return providerIDPathSplit[1] |
| 184 | +} |
| 185 | + |
| 186 | +type OpenStackLogCollector struct { |
| 187 | + E2EContext *E2EContext |
| 188 | +} |
| 189 | + |
| 190 | +// CollectMachineLog gets logs for the OpenStack resources related to the given machine. |
| 191 | +func (o OpenStackLogCollector) CollectMachineLog(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine, outputPath string) error { |
| 192 | + Logf("Collecting logs for machine %q and storing them in %q", m.ObjectMeta.Name, outputPath) |
| 193 | + |
| 194 | + if err := os.MkdirAll(outputPath, 0o750); err != nil { |
| 195 | + return fmt.Errorf("couldn't create directory %q for logs: %s", outputPath, err) |
| 196 | + } |
| 197 | + |
| 198 | + if m.Spec.ProviderID == nil { |
| 199 | + return fmt.Errorf("unable to get logs for machine since it has no provider ID") |
| 200 | + } |
| 201 | + providerID := getIDFromProviderID(*m.Spec.ProviderID) |
| 202 | + |
| 203 | + consolLog, err := GetOpenStackServerConsoleLog(o.E2EContext, providerID) |
| 204 | + if err != nil { |
| 205 | + return fmt.Errorf("error getting console log for machine: %s", err) |
| 206 | + } |
| 207 | + logFile := path.Join(outputPath, "console.log") |
| 208 | + if err := os.WriteFile(logFile, []byte(consolLog), 0o600); err != nil { |
| 209 | + return fmt.Errorf("error writing log file: %s", err) |
| 210 | + } |
| 211 | + Logf("Console log for machine %q saved", m.Name) |
| 212 | + |
| 213 | + openStackCluster, err := getOpenStackClusterFromMachine(ctx, managementClusterClient, m) |
| 214 | + if err != nil { |
| 215 | + return fmt.Errorf("error getting OpenStackCluster for Machine: %s", err) |
| 216 | + } |
| 217 | + |
| 218 | + if len(m.Status.Addresses) < 1 { |
| 219 | + return fmt.Errorf("unable to get logs for machine since it has no address") |
| 220 | + } |
| 221 | + ip := m.Status.Addresses[0].Address |
| 222 | + |
| 223 | +<<<<<<< HEAD |
| 224 | + srv, err := GetOpenStackServerWithIP(o.E2EContext, providerID, openStackCluster) |
| 225 | +======= |
| 226 | + srv, err := GetOpenStackServerWithIP(o.E2EContext, GetIDFromProviderID(*m.Spec.ProviderID), openStackCluster) |
| 227 | +>>>>>>> 4bdd8e8f (test(e2e): cover region in providerID clusterv1 machine object) |
| 228 | + if err != nil { |
| 229 | + return fmt.Errorf("error getting OpenStack server: %w", err) |
| 230 | + } |
| 231 | + |
| 232 | + serverJSON, err := json.MarshalIndent(srv, "", " ") |
| 233 | + if err != nil { |
| 234 | + return fmt.Errorf("error marshalling server %v: %s", srv, err) |
| 235 | + } |
| 236 | + if err := os.WriteFile(path.Join(outputPath, "server.txt"), serverJSON, 0o600); err != nil { |
| 237 | + return fmt.Errorf("error writing server JSON %s: %s", serverJSON, err) |
| 238 | + } |
| 239 | + |
| 240 | + if openStackCluster.Status.Bastion == nil { |
| 241 | + Logf("Skipping log collection for machine %q since no bastion is available", m.Name) |
| 242 | + } else { |
| 243 | + srvUser := o.E2EContext.E2EConfig.GetVariable(SSHUserMachine) |
| 244 | + executeCommands( |
| 245 | + ctx, |
| 246 | + o.E2EContext.Settings.ArtifactFolder, |
| 247 | + o.E2EContext.Settings.Debug, |
| 248 | + outputPath, |
| 249 | + ip, |
| 250 | + openStackCluster.Status.Bastion.FloatingIP, |
| 251 | + srvUser, |
| 252 | + []command{ |
| 253 | + // don't do this for now, it just takes to long |
| 254 | + // { |
| 255 | + // title: "systemd", |
| 256 | + // cmd: "journalctl --no-pager --output=short-precise | grep -v 'audit:\\|audit\\['", |
| 257 | + // }, |
| 258 | + { |
| 259 | + title: "kern", |
| 260 | + cmd: "journalctl --no-pager --output=short-precise -k", |
| 261 | + }, |
| 262 | + { |
| 263 | + title: "containerd-info", |
| 264 | + cmd: "crictl --runtime-endpoint unix:///run/containerd/containerd.sock info", |
| 265 | + }, |
| 266 | + { |
| 267 | + title: "containerd-containers", |
| 268 | + cmd: "crictl --runtime-endpoint unix:///run/containerd/containerd.sock ps", |
| 269 | + }, |
| 270 | + { |
| 271 | + title: "containerd-pods", |
| 272 | + cmd: "crictl --runtime-endpoint unix:///run/containerd/containerd.sock pods", |
| 273 | + }, |
| 274 | + { |
| 275 | + title: "cloud-final", |
| 276 | + cmd: "journalctl --no-pager -u cloud-final", |
| 277 | + }, |
| 278 | + { |
| 279 | + title: "kubelet", |
| 280 | + cmd: "journalctl --no-pager -u kubelet.service", |
| 281 | + }, |
| 282 | + { |
| 283 | + title: "containerd", |
| 284 | + cmd: "journalctl --no-pager -u containerd.service", |
| 285 | + }, |
| 286 | + }, |
| 287 | + ) |
| 288 | + } |
| 289 | + return nil |
| 290 | +} |
| 291 | + |
| 292 | +// CollectMachinePoolLog is not yet implemented for the OpenStack provider. |
| 293 | +func (o OpenStackLogCollector) CollectMachinePoolLog(_ context.Context, _ client.Client, _ *expv1.MachinePool, _ string) error { |
| 294 | + return fmt.Errorf("not implemented") |
| 295 | +} |
| 296 | + |
| 297 | +// CollectInfrastructureLogs is not yet implemented for the OpenStack provider. |
| 298 | +func (o OpenStackLogCollector) CollectInfrastructureLogs(_ context.Context, _ client.Client, _ *clusterv1.Cluster, _ string) error { |
| 299 | + return fmt.Errorf("not implemented") |
| 300 | +} |
0 commit comments