diff --git a/PrivateCloud.DiagnosticInfo/PrivateCloud.DiagnosticInfo.psm1 b/PrivateCloud.DiagnosticInfo/PrivateCloud.DiagnosticInfo.psm1 index 19d7765..ee3488f 100644 --- a/PrivateCloud.DiagnosticInfo/PrivateCloud.DiagnosticInfo.psm1 +++ b/PrivateCloud.DiagnosticInfo/PrivateCloud.DiagnosticInfo.psm1 @@ -1,6205 +1,6225 @@ -<################################################### - # # - # Copyright (c) Microsoft. All rights reserved. # - # # - ##################################################> - -$Module = 'PrivateCloud.DiagnosticInfo' - -<############################################################ -# Common helper functions/modules for main/child sessions # -############################################################> - -$CommonFuncBlock = { - - # FailoverClusters is Server-only. We allow the module to run (Show) on client. - # DcbQos is only present if the Data-Center-Bridging feature is present (for RoCE) - # Hyper-V may be ommitted in SOFS-only configurations - # - # Handling of import failures in Start-Job initialization blocks is special - we - # cannot control failure to load errors there, and need to use GM to check; neither - # -ErrorAction nor try/catch control propagation of the errors. - - if (Get-Module -ListAvailable FailoverClusters) { - Import-Module FailoverClusters - } - - if (Get-Module -ListAvailable DcbQos) { - Import-Module DcbQos - } - - if (Get-Module -ListAvailable Hyper-V) { - Import-Module Hyper-V - } - - Import-Module CimCmdlets - Import-Module NetAdapter - Import-Module NetQos - Import-Module SmbShare - Import-Module SmbWitness - Import-Module Storage - - # - # Shows error, cancels script - # - function Show-Error( - [string] $Message, - [System.Management.Automation.ErrorRecord] $e = $null - ) - { - $Message = "$(get-date -format 's') : $Message - cmdlet was cancelled" - if ($e) { - Write-Error $Message - throw $e - } else { - Write-Error $Message -ErrorAction Stop - } - } - - # - # Shows warning, script continues - # - function Show-Warning( - [string] $Message - ) - { - Write-Warning "$(get-date -format 's') : $Message" - } - - # - # Show arbitrary normal status message, with optional color coding - # - function Show-Update( - [string] $Message, - [System.ConsoleColor] $ForegroundColor = [System.ConsoleColor]::White - ) - { - Write-Host -ForegroundColor $ForegroundColor "$(get-date -format 's') : $Message" - } - - # Wrapper for Import-Clixml which provides for basic feedback on missing/not-gathered elements - function Import-ClixmlIf( - [string] $Path, - [string] $MessageIf = $null - ) - { - if (Test-Path $path) { - Import-Clixml $path - } else { - $m = "$Path not present" - if ($MessageIf) { - $m = "$MessageIf : " + $m - } - Show-Warning $m - $null - } - } - - function Show-JobRuntime( - [object[]] $jobs, - [hashtable] $namehash, - [switch] $IncludeDone = $true, - [switch] $IncludeRunning = $true - ) - { - # accumulate status lines as we go - $job_running = @() - $job_done = @() - - $jobs | sort Name,Location |% { - - $this = $_ - - # crack parents to children - # map children to names through the input namehash - switch ($_.GetType().Name) { - - 'PSRemotingJob' { - $jobname = $this.Name - $j = $this.ChildJobs | sort Location - } - - 'PSRemotingChildJob' { - if ($namehash.ContainsKey($this.Id)) { - $jobname = $namehash[$this.Id] - } else { - $jobname = "" - } - $j = $this - } - - default { throw "unexpected job type $_" } - } - - if ($IncludeDone) { - $j |? State -ne Running |% { - $job_done += "$($_.State): $($jobname) [$($_.Name) $($_.Location)]: $(($_.PSEndTime - $_.PSBeginTime).ToString("m'm's\.f's'")) : Start $($_.PSBeginTime.ToString('s')) - Stop $($_.PSEndTime.ToString('s'))" - } - } - - if ($IncludeRunning) { - $t = get-date - $j |? State -eq Running |% { - $job_running += "Running: $($jobname) [$($_.Name) $($_.Location)]: $(($t - $_.PSBeginTime).ToString("m'm's\.f's'")) : Start $($_.PSBeginTime.ToString('s'))" - } - } - } - - if ($job_running.Count) { - $job_running |% { Show-Update $_ } - } - - if ($job_done.Count) { - $job_done |% { Show-Update $_ } - } - } - - function Show-WaitChildJob( - [object[]] $jobs, - [int] $tick = 5 - ) - { - # remember parent job names of all children for output - # ids are session global, monotonically increasing integers - $jhash = @{} - $jobs |% { - $j = $_ - $j.ChildJobs |% { - $jhash[$_.Id] = $j.Name - } - } - - $tout_c = $tick - $ttick = get-date - - # set up trackers. Note that jwait will slice to all child jobs on all input jobs. - $jdone = @() - $jwait = $jobs.ChildJobs - $jtimeout = $false - - do { - - $jdone_c = $jwait | wait-job -any -timeout $tout_c - $td = (get-date) - $ttick - - if ($jdone_c) { - - # write-host -ForegroundColor Red "done" - Show-JobRuntime $jdone_c $jhash - $tout_c = [int] ($tick - $td.TotalSeconds) - if ($tout_c -lt 1) { $tout_c = 1 } - # write-host -ForegroundColor Yellow "waiting additional $tout_c s (tout $tout and so-far $($td.TotalSeconds))" - - $jdone += $jdone_c - $jwait = $jwait |? { $_ -notin $jdone_c } - - } else { - - $jtimeout = $true - - # write-host -ForegroundColor Yellow "timeout tick" - write-host ("-"*20) - $ttick = get-date - $tout_c = $tick - - # exclude jobs which may be racing to done, we'll get them in the next tick - Show-JobRuntime $jwait $jhash -IncludeDone:$false - } - - } while ($jwait) - - # consume parent waits, which should be complete (all children complete) - $null = Wait-Job $jobs - - # only do a total summary if we hit a timeout and did a running summary - - if ($jtimeout) { - write-host "Job Summary" -ForegroundColor Green - Show-JobRuntime $jobs - } - } - - # - # Convert an absolute local path to the equivalent remote path via SMB admin shares - # ex: c:\foo\bar & scratch -> \\scratch\C$\foo\bar - # - - function Get-AdminSharePathFromLocal( - [string] $node, - [string] $local - ) - { - "\\"+$node+"\"+$local[0]+"$\"+$local.Substring(3,$local.Length-3) - } - - # - # Common function to construct path to per-node data directory - # - - function Get-NodePath( - [string] $Path, - [string] $node - ) - { - Join-Path $Path "Node_$node" - } - - # - # Count number of elements in an array, including checks for $null or single object - # - function NCount { - Param ([object] $Item) - if ($null -eq $Item) { - $Result = 0 - } else { - if ($Item.GetType().BaseType.Name -eq "Array") { - $Result = ($Item).Count - } else { - $Result = 1 - } - } - return $Result - } - - function Get-SddcCapturedEvents ( - [string] $Path, - [int] $Hours - ) - { - # Calculate number of milliseconds and prepare the WEvtUtil parameter to filter based on date/time - $QTime = $null - $ExcludeBigEvtx = $true - - if ($Hours -ne -1) { - $MSecs = $Hours * 60 * 60 * 1000 - $QTime = "*[System[TimeCreated[timediff(@SystemTime) <= "+$MSecs+"]]]" - } - - # Exclude verbose/lower value channels - # The FailoverClustering Diagnostics are reflected in the cluster logs, already gathered (and large) - # StorageSpaces Performance is very expensive to export and not usually needed - $LogToExclude = 'Microsoft-Windows-FailoverClustering/Diagnostic', - 'Microsoft-Windows-FailoverClustering/DiagnosticVerbose', - 'Microsoft-Windows-FailoverClustering-Client/Diagnostic', - 'Microsoft-Windows-StorageReplica/Performance', - 'Microsoft-Windows-StorageSpaces-Driver/Performance' - - $GetWinEventDumpFilePath = Join-Path $Path "getwinevent.json" - $GetWinEventDumpXmlPath = Join-Path $Path "getwinevent.xml" - - Get-WinEvent -ListLog * -Force > $GetWinEventDumpFilePath - Get-WinEvent -ListLog * -Force | Export-Clixml $GetWinEventDumpXmlPath - - Write-Output $GetWinEventDumpFilePath - Write-Output $GetWinEventDumpXmlPath - - if ($ExcludeBigEvtx) { - $LogToExclude += 'Security', - 'Microsoft-Windows-SystemDataArchiver/Diagnostic', - 'Microsoft-Windows-Health/Diagnostic' - } - - Get-WinEvent -ListLog * -Force -ErrorAction Ignore -WarningAction Ignore |? { $LogToExclude -notcontains $_.LogName } |% { - - $EventFile = Join-Path $Path ($_.LogName.Replace("/","-")+".EVTX") - - # analytical/debug channels can not be captured live - # if any are encountered (not normal), disable them temporarily for export - $directChannel = $false - if ($_.LogType -in @('Analytical','Debug') -and $_.IsEnabled) { - $directChannel = $true - wevtutil sl /e:false $_.LogName - } - - # Export log file using, filtered to given history limit if specified - if ($QTime) { - wevtutil epl $_.LogName $EventFile /q:$QTime /ow:true - } else { - wevtutil epl $_.LogName $EventFile /ow:true - } - - if ($directChannel -eq $true) { - echo y | wevtutil sl /e:true $_.LogName | out-null - } - - if ($directChannel -eq $true -or - $_.RecordCount -ne 0) { - - # Create locale metadata for off-system rendering - wevtutil al $EventFile /l:$PSCulture - - # Emit filename for capture - Write-Output $EventFile - } - } - - # work around temp file leak re: archive-log/wevtsvc - # conservatively estimate that any file older than - # after sleeping a few seconds must be stale, so we do not stomp other - # tools using the same functionality. - $t = Get-Date - sleep 5 - dir $env:WINDIR\ServiceProfiles\LocalService\AppData\Local\Temp |? { - ($_.Name -like 'MSG*.tmp' -or - $_.Name -like 'EVT*.tmp' -or - $_.Name -like 'PUB*.tmp') -and - $_.CreateTime -lt $t - } | del -Force -ErrorAction SilentlyContinue - } - - # wrapper for common date format for file naming - function Format-SddcDateTime( - [datetime] $d - ) - { - $d.ToString('yyyyMMdd-HHmm') - } - - # helper for testing/emitting feedback on module presence - # use this on icm to remote nodes - # this will be obsolete if/when we can integrate with add-node - function Test-SddcModulePresence - { - # note that we can't pull from the global - $Module = 'PrivateCloud.DiagnosticInfo' - $m = Get-Module $Module - - if (-not $m) { - Write-Warning "Node $($env:COMPUTERNAME) does not have the $Module module installed for Sddc Diagnostic Archive. Please 'Install-SddcDiagnosticModule -Node $($env:COMPUTERNAME)' to address." - $false - } else { - $true - } - } - - # function for constructing filter xpath queries for event channels - # event: list of event ids - # timebase: time base for timedelta query (default: current system time) - # timedeltams: time in ms relatve to base to filter event timestamps to (older than base) - # data: table of k=v the event(s) must match - # - # events are OR'd - # time is AND'd - # dataand is AND'd - # dataor is dataand AND dataor OR'd (e.g. (a and b and c) and (d or e or f) - function Get-FilterXpath - { - [CmdletBinding(PositionalBinding=$false)] - param ( - [int[]] $Event = @(), - - [datetime] $TimeBase, - - [ValidateScript({$_ -gt 0})] - [int] $TimeDeltaMs = -1, - - [hashtable] $DataAnd = @{}, - [hashtable] $DataOr = @{} - ) - - # first build out the system property clauses - $systemclauses = @() - - # build the event id clause - if ($Event.Count) { - $c = $Event |% { "EventID = $_" } - # bracket eventids iff there are multiple - if ($Event.Count -gt 1) { - $systemclauses += "(" + ($c -join " or ") + ")" - } else { - $systemclauses += $c - } - } - - # build the time delta clause - # based - two argument, relative to $TimeBase - # unbase - one argument, relative to instantaneous system time - if ($TimeDeltaMs -gt 0) { - if ($TimeBase) { - $t = $TimeBase.ToUniversalTime().ToString('s') - $systemclauses += "(TimeCreated[timediff(@SystemTime,'$($t)') <= $($TimeDeltaMs)])" - } else { - $systemclauses += "(TimeCreated[timediff(@SystemTime) <= $($TimeDeltaMs)])" - } - } - - # system property clauses and together - $systemclause = "System[" + ($systemclauses -join " and ") + "]" - - # now build data clauses - $cAnd = @($DataAnd.Keys | sort |% { - "Data[@Name = '" + $_ + "'] " + $DataAnd[$_] - }) -join " and " - - $cOr = @($DataOr.Keys | sort |% { - "Data[@Name = '" + $_ + "'] " + $DataOr[$_] - }) -join " or " - - # both and'd, one or the other, or neither - if ($cAnd.Length -and $cOr.Length) { - $dataclause = "EventData[$cAnd and ($cOr)]" - } elseif ($cAnd.Length -or $cOr.Length) { - $dataclause = "EventData[$($cAnd + $cOr)]" - } else { - $dataclause = $null - } - - # and join system+data with and - if ($dataclause) { - $xpath = "*[$systemclause and $dataclause]" - } else { - $xpath = "*[$systemclause]" - } - - $xpath - } - - # Makes a custom object out of an input stream of colon seperated k/v data - # ex: - # creationTime: 2018-06-25T20:07:47.030Z - # lastAccessTime: 2018-06-25T20:07:47.030Z - # lastWriteTime: 2018-06-25T19:53:58.000Z - # fileSize: 1118208 - # attributes: 32 - # numberOfLogRecords: 523 - # oldestRecordNumber: 1 - # - # -> object with named members. Any member name ending in Time is pre-cast to [datetime]. - function Parse-SemicolonKVData - { - BEGIN { $o = new-object psobject } - PROCESS { - # split at the first semi - $null = $_ -match '^([^:]+)\s*:\s*(.*)$' - $k = $matches[1] - $v = $matches[2] - if ($k -like '*time') { - $o | Add-Member -NotePropertyName $matches[1] -NotePropertyValue ([datetime]$matches[2]) - } else { - $o | Add-Member -NotePropertyName $matches[1] -NotePropertyValue $matches[2] - } - - } - END { $o } - } - - # count the number of events in a given event log which match a given xpath query - # - # powershell's event pipeline is very slow for bulk counting when there may be many - # events (or simply many event channels that our caller needs to sift through) - # use wevtutil queries to a temporary file and then count it using its gli facility - function Count-EventLog - { - param( - [string] $path, - [string] $xpath - ) - - $f = New-TemporaryFile - - try { - - wevtutil epl /lf:true $path $f /q:$xpath /ow:true - $gli = wevtutil gli /lf:true $f | Parse-SemicolonKVData - $gli.numberOfLogRecords - - } finally { - del -Force $f - } - } - - function Get-EventDataHash - { - - param( - [System.Diagnostics.Eventing.Reader.EventLogRecord] $event - ) - - # must cast through the XML representation of the event to get named properties - # insert text into hash and return - $xh = @{} - $x = ([xml]$event.ToXml()).Event.EventData.Data - $x |% { - $xh[$_.Name] = $_.'#text' - } - $xh - } -} - -# do a basic compression on the common function block, removing -# comment-only lines -# whitespace-only lines -# leading whitespace -# this is observed to reduce it by about 50% in character count -# the string transform of the block is used in session passing (job -InitializationScript) and -# cannot exceed about 14KiB in size -$CommonFunc = [scriptblock]::Create($( - ((([string]$CommonFuncBlock) -split "`n") |? { $_ -notmatch '^\s*#' } |? { $_ -notmatch '^\s*$' }) -replace '^\s+','' -join "`n" -)) - -# evaluate into the main session -# without a direct assist like start-job -initialization script, passing into -# other contexts converts to string, which we must undo with [scriptblock]::Create() -. $CommonFunc - -<#################################################### -# Common helper functions for main session only # -####################################################> - -# -# This tests whether a path is a valid prefix name for a new file (e.g., $path + .ZIP) -# A ref is provided so it can be rewritten to an absolute path if specified in drive or -# directory relative forms - .NET callouts have cwd = WINDIR, which confuses things v. -# normal expectations -# - -function Test-PrefixFilePath( - [ref] $path - ) -{ - $p = $path.Value - $elements = @($p -split '\\') - - # we need to tear off the last element and test the parent. before doing that, - # need to check that we have enough path to use in the first place. - # also check to see if we have simple cases of a usable single name - - # the last split element cannot be empty (e.g., bad: some\path\, good: some\path) - # a single element is OK as long as it isn't a driveletter (e.g., bad: C:, good: foo) - # unc is OK as long as we see at least 5 post-split elements (e.g., bad: \\foo, \\foo\bar, good: \\foo\bar\baz) - - $lastempty = $elements[-1] -notmatch '\S' - $islocabs = $elements[0].Length -and $elements[0][1] -eq ':' - $isunc = $p -like '\\*' - - if ($lastempty -or - ($islocabs -and $elements.Count -eq 1) -or - ($isunc -and $elements.Count -lt 5)) { - return $false - } - - # if not local absolute or unc, it is local relative - # force this to local absolute by - # 1. prepending the : component of the cwd if it is drive-relative absolute, starting with \ - # 2. prepending the entire cwd otherwise - # - # in these cases we must return the updated path to the caller - - if (-not ($islocabs -or $isunc)) { - - # local drive relative (test needed) - if ($p[0] -eq '\') { - - # prepend the drive letter - $p = Join-Path ((Get-Location).Path.SubString(0,2)) $p - - # drive relative single element (no test needed, return immediately) (e.g., \foo, NOT \foo\bar) - if ($elements.Count -eq 2) { - - $path.Value = $p - return $true - } - - # ... must be multi-element (test needed) - # resplit for the prefix test (e.g., got foo\bar, must set up so we test c:\the\cwd\foo) - $elements = @($p -split '\\') - - } else { - - # prepend cwd - $p = Join-Path (Get-Location).Path $p - - # local single element (no test needed, return immediately) - if ($elements.Count -eq 1) { - - $path.Value = $p - return $true - } - - # ... must be local relative multi-element (test needed) - # resplit ... - $elements = @($path.Value -split '\\') - } - } - - # rejoin without the tail and test - $tp = $elements[0..($elements.Count-2)] -join '\' - - # return potentially updated path, but only modify on success - if (Test-Path $tp) { - $path.Value = $p - $true - } else { - $false - } -} - -function Check-ExtractZip( - [string] $Path - ) -{ - if ($Path.ToUpper().EndsWith(".ZIP")) { - - $ExtractToPath = $Path.Substring(0, $Path.Length - 4) - - # Already done? - $f = gi $ExtractToPath -ErrorAction SilentlyContinue - if ($f) { - return $f.FullName - } - - Show-Update "Extracting $Path -> $ExtractToPath" - - try - { - Add-Type -Assembly System.IO.Compression.FileSystem - [System.IO.Compression.ZipFile]::ExtractToDirectory($Path, $ExtractToPath) - } - catch - { - Show-Error("Can't extract results as Zip file from '$Path' to '$ExtractToPath'") - } - - return $ExtractToPath - } - - return $Path -} - -# -# Utility wrapper for copyout jobs which allows seperating ones which -# delete temporary/gathered content and ones which gather persistent -# content (like archive logs) -# - -function Start-CopyJob( - [string] $Path, - [switch] $Delete, - [object[]] $j - ) -{ - $j |% { - - $parent = $_ - $parent.ChildJobs |% { - - $logs = Receive-Job $_ - - # avoid the job if not needed, no content - if (@($logs).Count) { - - # create/use a specific job destination if present - # ex: "foo" -> \node_xxx\foo\ v. the default \node_xxx\ - $Destination = (Get-NodePath $Path $_.Location) - if (Get-Member -InputObject $_ -Name Destination) { - $Destination = Join-Path $Destination $_.Destination - if (-not (Test-Path $Destination)) { - $null = md $Destination -Force -ErrorAction Continue - } - } - - start-job -Name "Copy $($parent.Name) $($_.Location)" -ArgumentList $logs,$Destination,$Delete { - - param($logs,$Destination,$Delete) - - $logs |% { - # allow errors to propagte for triage - Copy-Item -Recurse $_ $Destination -Force -ErrorAction Continue - if ($Delete) { - Remove-Item -Recurse $_ -Force -ErrorAction Continue - } - } - } - } - } - } -} - -# -# Utility wrapper for invoking commands by opening sessions -# for each of the cluster nodes and preserving the session -# to be deleted after use. -# - -function Invoke-SddcCommonCommand ( - [string[]] $ClusterNodes = @(), - [string] $JobName, - [scriptblock] $InitBlock, - [scriptblock] $ScriptBlock, - # If session configuration name is $null, it connect to default powershell - [string] $SessionConfigurationName, - [Object[]] $ArgumentList - ) -{ - $Job = @() - $Sessions = @() - $SessionIds = @() - - if ($ClusterNodes.Count -eq 0) - { - $Sessions = New-PSSession -Cn localhost -EnableNetworkAccess -ConfigurationName $SessionConfigurationName - } - else - { - $Sessions = New-PSSession -ComputerName $ClusterNodes -ConfigurationName $SessionConfigurationName - } - - Invoke-Command -Session $Sessions $InitBlock - $Job = Invoke-Command -Session $Sessions -AsJob -JobName $JobName -ScriptBlock $ScriptBlock -ArgumentList $ArgumentList - $SessionIds = $Sessions.Id - - $Job | Add-Member -NotePropertyName ActiveSessions -NotePropertyValue $SessionIds - - return $Job -} - -# -# Goes over a list of nodes and finds the first one that can access the cluster -# if none of them can access the cluster it returns nothing -# - -function Get-ClusterAccessNode( - $Nodes -) -{ - for ($i = 0; $i -lt $Nodes.count; $i++) - { - $Cluster = Get-Cluster $Nodes[$i].Name -ErrorAction SilentlyContinue - if ($Cluster -ne $null) - { - return $Nodes[$i].Name - } - } -} - -# -# Makes a list of cluster nodes or equivalent property-containing objects (Name/State) -# Optionally filtered for if they are physically responding v. cluster visible state. -# - -function Get-NodeList( - [string] $Cluster, - [string[]] $Nodes = @(), - [switch] $Filter -) -{ - $NodesToPing = @() - $SuccesfullyPingedNodes = @() - $NodesToReturn = @() - - if ($Nodes.Count) { - $NodesToPing += $Nodes |% { New-Object -TypeName PSObject -Property @{ "Name" = $_; "State" = "Down"; "Type" = "ManuallySpecifiedMachine" }} - } - - - # Now try to contact the cluster - first via name then by every name from $Nodes.Count above if that fails, until we succesfully contact cluster - # Add any nodes missing from $NodesToPing / Replace objects in the list with their real objects - $ClusterNodes = $null - - if ($Cluster -ne "" -and $Cluster -ne $null) - { - $ClusterNodes = Get-ClusterNode -Cluster $Cluster -ErrorAction SilentlyContinue - } - - $NodeIdx = 0; - while ($ClusterNodes -eq $null -and $NodeIdx -lt $NodesToPing.Count) - { - # we failed to get it, iterate through the nodes - $ClusterNodes = Get-ClusterNode -Cluster $NodesToPing[$NodeIdx].Name -ErrorAction SilentlyContinue - - $NodeIdx++ - } - - if ($ClusterNodes -ne $null) - { - if ($Nodes.Count) - { - # Replace their objects if found, add to list otherwise - for ($i = 0; $i -lt $ClusterNodes.Count; $i++) - { - $found = $false - - for ($j = 0; $j -lt $NodesToPing.Count; $j++) - { - if ($NodesToPing[$j].Name -eq $ClusterNodes[$i].Name) - { - $NodesToPing[$j] = $ClusterNodes[$i] - $found = $true - break - } - } - - if ($found -ne $true) - { - $NodesToPing += @($ClusterNodes[$i]) - } - } - } - else - { - $NodesToPing = @($ClusterNodes) - } - } - - - # Try to ping the nodes - - if ($NodesToPing.Count) { - - $PingResults = @() - # Test-NetConnection is ~3s. Parallelize for the sake of larger clusters/lists of nodes. - $j = $NodesToPing |% { - - Start-Job -ArgumentList $_ { - param( $Node ) - if (Test-Connection -ComputerName $Node.Name -Quiet) { - $Node - } - } - } - - $null = Wait-Job $j - $PingResults += $j | Receive-Job - $j | Remove-Job - - # For any notes that are "fake objects" (Type=Machine instead of Type=Node) mark them up - # Copy from NodesToPing to SuccesfullyPingedNodes if they're contained in PingResults - # Doing this because the job mutates the object - for ($i = 0; $i -lt $PingResults.Count; $i++) - { - for ($j = 0; $j -lt $NodesToPing.Count; $j++) - { - if ($NodesToPing[$j].Name -eq $PingResults[$i].Name) - { - $SuccesfullyPingedNodes += @($NodesToPing[$j]) - } - } - } - - } - - if ($Filter) { - $NodesToReturn = $SuccesfullyPingedNodes - } else { - # unfiltered, return all - $NodesToReturn = $NodesToPing - } - - - return $NodesToReturn -} - -<################################################## -# End Helper functions # -##################################################> - -<# -.SYNOPSIS - Get state and diagnostic information for all Software-Defined DataCenter (SDDC) features in a Windows Server 2016 cluster. - -.DESCRIPTION - Get state and diagnostic information for all Software-Defined DataCenter (SDDC) features in a Windows Server 2016 cluster. - Run from one of the nodes of the cluster or specify a cluster name, or specify a set of nodes directly. Results are saved - to a ZIP archive for later review and analysis. - -.LINK - To provide feedback and contribute visit https://github.com/PowerShell/PrivateCloud.Health - -.EXAMPLE - Get-SddcDiagnosticInfo - - Targets the cluster the local computer is a member of. - Uses the default temporary working folder at $env:USERPROFILE\HealthTest - Saves the zipped results at $env:USERPROFILE\HealthTest--.ZIP - -.EXAMPLE - Get-SddcDiagnosticInfo -WriteToPath C:\Test - - Uses the specified folder as the temporary working folder. This does not change the location of - the zipped results. - -.EXAMPLE - Get-SddcDiagnosticInfo -ClusterName Cluster1 - - Targets the specified cluster, Cluster1. - -.EXAMPLE - Get-SddcDiagnosticInfo -ReadFromPath C:\Test.ZIP - - Display the summary health report from the capture located in the given ZIP. The content is - unzipped to a directory (minus the .ZIP extension) and remains after the summary health report - is shown. - - In this example, C:\Test would be created from C:\Test.ZIP. If the .ZIP path is specified and - the unzipped directory is present, the directory will be reused without re-unzipping the - content. - - EQUIVALENT: Show-SddcDiagnosticReport -Report Summary -Path - - The file 0_CloudHealthSummary.log in the capture contains the summary report at the time the - capture was taken. Running the report again is a re-analysis of the content, which may reflect - new triage if PrivateCloud.DiagnosticInfo has been updated in the interim. - -.EXAMPLE - Get-SddcDiagnosticInfo -ReadFromPath C:\Test - - Display the summary health report from the capture located in the given directory, which should - be an unzipped capture. - -.PARAMETER ReadFromPath -Path to read content from for summary health report generation. - -.PARAMETER TemporaryPath -Temporary path to stage capture content to, prior to ZIP creation. - -.PARAMETER ClusterName -Cluster to capture content from. - -.PARAMETER Nodelist -List of nodes to capture content from. - -.PARAMETER HoursOfEvents -For sources which support it, limit log and event data to the prior number of hours. By default -all available data is captured (-1). - -.PARAMETER DaysOfArchive -Limit the number of days of Sddc Diagnostic Archive captured. Only applicable if Sddc Diagnostic -Archive is active in the target cluster. By default 8 days are captured. - -Specify -1 to capture the complete archive - NOTE: this may be very large. -Specify 0 to disable capture of the archive. - -.PARAMETER ZipPrefix -Path for the resulting ZIP file: --.ZIP will be appended. - -.PARAMETER MonitoringMode -Run in a limited monitoring mode (deprecated) - -.PARAMETER ExpectedNodes -Specify the expected number of nodes. A summary warning will be issued if a different number is -present. - -.PARAMETER ExpectedNetworks -Specify the expected number of networks. A summary warning will be issued if a different number is -present. - -.PARAMETER ExpectedVolumes -Specify the expected number of volumes. A summary warning will be issued if a different number is -present. - -.PARAMETER ExpectedDedupVolumes -Specify the expected number of dedeuplicated volumes. A summary warning will be issued if a -different number is present. - -.PARAMETER ExpectedPhysicalDisks -Specify the expected number of physical disks. A summary warning will be issued if a different -number is present. - -.PARAMETER ExpectedPools -Specify the expected number of storage pools. A summary warning will be issued if a different -number is present. - -.PARAMETER ExpectedEnclosures -Specify the expected number of storage enclosures. A summary warning will be issued if a different -number is present. - -.PARAMETER ProcessCounter -Process the performance counters into a summary report (deprecated) - -.PARAMETER PerfSamples -Specify the number of performance counter samples to capture (in seconds, 1/s). - -.PARAMETER IncludeAssociations -Include additional object association information (deprecated) - -.PARAMETER IncludeDumps -Include minidumps and live kernel report dumps. - -.PARAMETER IncludeGetNetView -Include content from the Get-NetView (NetDiagnosticInfo module) command, if present. - -.PARAMETER IncludeHealthReport -Include an additional health report (deprecated) - -.PARAMETER IncludeClusterPerformanceHistory -Include an Cluster Performance History report - -.PARAMETER PerformanceHistoryTimeFrame -If Cluster Performance History is collected what is the time frame of collection. - -.PARAMETER IncludeLiveDump -Include a live dump of the target systems - -.PARAMETER IncludeStorDiag -Include a storage diagnostic log of the target systems - -.PARAMETER IncludeProcessDump -Include the process dump of the default process and processlists, if present. - -.PARAMETER ProcessLists -Include the process dump for these processlists (comma seperated), if present. - -.PARAMETER IncludePerformance -Include a performance counter capture. - -.PARAMETER IncludeReliabilityCounters -Include Storage Reliability counters. This may incur a short but observable latency cost on the -physical disks due to varying overhead in their internal handling of SMART queries. - -.PARAMETER SessionConfigurationName -SessionConfigurationName to connect to other nodes in cluster. -Null if default configuration is to be used. - -#> - -function Get-SddcDiagnosticInfo -{ - # aliases usage in this module is idiomatic, only using defaults - [Diagnostics.CodeAnalysis.SuppressMessageAttribute("PSAvoidUsingCmdletAliases", "")] - - # - # Parameter sets: - # Read - backcompat alias for Show Summary report - # Write(C|N) - capture with -Cluster or -Nodelist - # M - monitoring mode - # - - [CmdletBinding(DefaultParameterSetName="WriteC")] - [OutputType([String])] - - param( - [parameter(ParameterSetName="WriteC", Position=0, Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Position=0, Mandatory=$false)] - [alias("WriteToPath")] - [ValidateNotNullOrEmpty()] - [string] $TemporaryPath = $($env:userprofile + "\HealthTest\"), - - [parameter(ParameterSetName="M", Position=1, Mandatory=$false)] - [parameter(ParameterSetName="WriteC", Position=1, Mandatory=$false)] - [string] $ClusterName = ".", - - [parameter(ParameterSetName="WriteN", Position=1, Mandatory=$true)] - [ValidateNotNullOrEmpty()] - [string[]] $Nodelist = @(), - - [parameter(ParameterSetName="Read", Mandatory=$true)] - [ValidateNotNullOrEmpty()] - [string] $ReadFromPath = "", - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [bool] $IncludePerformance = $true, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [ValidateRange(1,3600)] - [int] $PerfSamples = 30, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [switch] $ProcessCounter, - - [parameter(ParameterSetName="M", Mandatory=$true)] - [switch] $MonitoringMode, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [int] $HoursOfEvents = -1, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [ValidateRange(-1,365)] - [int] $DaysOfArchive = 8, - - [parameter(ParameterSetName="WriteC", Position=2, Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Position=2, Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [string] $ZipPrefix = $($env:userprofile + "\HealthTest"), - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [ValidateRange(1,1000)] - [int] $ExpectedNodes, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [ValidateRange(1,1000)] - [int] $ExpectedNetworks, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [ValidateRange(0,1000)] - [int] $ExpectedVolumes, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [ValidateRange(0,1000)] - [int] $ExpectedDedupVolumes, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [ValidateRange(1,10000)] - [int] $ExpectedPhysicalDisks, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [ValidateRange(1,1000)] - [int] $ExpectedPools, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [ValidateRange(1,10000)] - [int] $ExpectedEnclosures, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [switch] $IncludeAssociations, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [switch] $IncludeDumps, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [switch] $IncludeGetNetView, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [switch] $SkipVM, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [switch] $IncludeHealthReport, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [switch] $IncludeClusterPerformanceHistory, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [ValidateSet('LastHour','LastDay','LastWeek','LastMonth','LastYear')] - [string] $PerformanceHistoryTimeFrame = "LastDay", - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [switch] $IncludeLiveDump, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [switch] $IncludeStorDiag, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [switch] $IncludeProcessDump, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [string] $Processlists = "", - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [switch] $IncludeReliabilityCounters, - - [parameter(ParameterSetName="WriteC", Mandatory=$false)] - [parameter(ParameterSetName="WriteN", Mandatory=$false)] - [string] $SessionConfigurationName = $null - ) - - # - # Set strict mode to check typos on variable and property names - # - - Set-StrictMode -Version Latest - - function VolumeToPath { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeID -eq $Volume) { $Result = $_.CSVPath } - } - return $Result - } - - function VolumeToCSV { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeID -eq $Volume) { $Result = $_.CSVVolume } - } - return $Result - } - - function VolumeToVD { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeID -eq $Volume) { $Result = $_.FriendlyName } - } - return $Result - } - - function VolumeToShare { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeID -eq $Volume) { $Result = $_.ShareName } - } - return $Result - } - - function VolumeToResiliency { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeID -eq $Volume) { - $Result = $_.VDResiliency+","+$_.VDCopies - if ($_.VDEAware) { - $Result += ",E" - } else { - $Result += ",NE" - } - } - } - return $Result - } - - function VolumeToColumns { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeID -eq $Volume) { $Result = $_.VDColumns } - } - return $Result - } - - function CSVToShare { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.CSVVolume -eq $Volume) { $Result = $_.ShareName } - } - return $Result - } - - function VolumeToPool { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeId -eq $Volume) { $Result = $_.PoolName } - } - return $Result - } - - function CSVToVD { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.CSVVolume -eq $Volume) { $Result = $_.FriendlyName } - } - return $Result - } - - function CSVToPool { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.CSVVolume -eq $Volume) { $Result = $_.PoolName } - } - return $Result - } - - function CSVToNode { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.CSVVolume -eq $Volume) { $Result = $_.CSVNode } - } - return $Result - } - - function VolumeToCSVName { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeId -eq $Volume) { $Result = $_.CSVName } - } - return $Result - } - - function CSVStatus { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeId -eq $Volume) { $Result = $_.CSVStatus.Value } - } - return $Result - } - - function PoolOperationalStatus { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeId -eq $Volume) { $Result = $_.PoolOpStatus } - } - return $Result - } - - function PoolHealthStatus { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeId -eq $Volume) { $Result = $_.PoolHealthStatus } - } - return $Result - } - - function PoolHealthyPDs { - Param ([String] $PoolName) - $healthyPDs = "" - if ($PoolName) { - $totalPDs = (Get-StoragePool -FriendlyName $PoolName -CimSession $ClusterName -ErrorAction SilentlyContinue | Get-PhysicalDisk).Count - $healthyPDs = (Get-StoragePool -FriendlyName $PoolName -CimSession $ClusterName -ErrorAction SilentlyContinue | Get-PhysicalDisk |? HealthStatus -eq "Healthy" ).Count - } - else { - Show-Error("No storage pool specified") - } - return "$totalPDs/$healthyPDs" - } - - function VDOperationalStatus { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeId -eq $Volume) { $Result = $_.OperationalStatus } - } - return $Result - } - - function VDHealthStatus { - Param ([String] $Volume) - if ($null -eq $Associations) { Show-Error("No device associations present.") } - $Result = "" - $Associations |% { - if ($_.VolumeId -eq $Volume) { $Result = $_.HealthStatus } - } - return $Result - } - - # - # Veriyfing basic prerequisites on script node. - # - - $OS = Get-CimInstance -ClassName Win32_OperatingSystem - $S2DEnabled = $false - - if ([uint64]$OS.BuildNumber -lt 14393) { - Show-Error("Wrong OS Version - Need at least Windows Server 2016. You are running - $($OS.Name) BuildNumber $($OS.BuildNumber)") - } - - if (-not (Get-Command -Module FailoverClusters)) { - Show-Error("Cluster PowerShell not available. Download the Windows Failover Clustering RSAT tools.") - } - - function StartMonitoring { - Show-Update "Entered continuous monitoring mode. Storage Infrastucture information will be refreshed every 3-6 minutes" -ForegroundColor Yellow - Show-Update "Press Ctrl + C to stop monitoring" -ForegroundColor Yellow - - try { $ClusterName = (Get-Cluster -Name $ClusterName).Name } - catch { Show-Error("Cluster could not be contacted. `nError="+$_.Exception.Message) } - - $NodeList = Get-NodeList -Cluster $ClusterName -Filter - - $AccessNode = Get-ClusterAccessNode @($NodeList) - - if ($AccessNode -ne $null) - { - $AccessNode = $AccessNode + "." + (Get-Cluster -Name $AccessNode).Domain - } - - try { $Volumes = Get-Volume -CimSession $AccessNode } - catch { Show-Error("Unable to get Volumes. `nError="+$_.Exception.Message) } - - $AssocJob = Start-Job -ArgumentList $AccessNode,$ClusterName { - - param($AccessNode,$ClusterName) - - $SmbShares = Get-SmbShare -CimSession $AccessNode - $Associations = Get-VirtualDisk -CimSession $AccessNode |% { - - $o = $_ | Select-Object FriendlyName, CSVName, CSVNode, CSVPath, CSVVolume, - ShareName, SharePath, VolumeID, PoolName, VDResiliency, VDCopies, VDColumns, VDEAware - - $AssocCSV = $_ | Get-ClusterSharedVolume -Cluster $ClusterName - - if ($AssocCSV) { - $o.CSVName = $AssocCSV.Name - $o.CSVNode = $AssocCSV.OwnerNode.Name - $o.CSVPath = $AssocCSV.SharedVolumeInfo.FriendlyVolumeName - if ($o.CSVPath.Length -ne 0) { - $o.CSVVolume = $o.CSVPath.Split("\")[2] - } - $AssocLike = $o.CSVPath+"\*" - $AssocShares = $SmbShares |? Path -like $AssocLike - $AssocShare = $AssocShares | Select-Object -First 1 - if ($AssocShare) { - $o.ShareName = $AssocShare.Name - $o.SharePath = $AssocShare.Path - $o.VolumeID = $AssocShare.Volume - if ($AssocShares.Count -gt 1) { $o.ShareName += "*" } - } - } - - Write-Output $o - } - - $AssocPool = Get-StoragePool -CimSession $AccessNode -ErrorAction SilentlyContinue - $AssocPool |% { - $AssocPName = $_.FriendlyName - Get-StoragePool -CimSession $AccessNode -FriendlyName $AssocPName | - Get-VirtualDisk -CimSession $AccessNode |% { - $AssocVD = $_ - $Associations |% { - if ($_.FriendlyName -eq $AssocVD.FriendlyName) { - $_.PoolName = $AssocPName - $_.VDResiliency = $AssocVD.ResiliencySettingName - $_.VDCopies = $AssocVD.NumberofDataCopies - $_.VDColumns = $AssocVD.NumberofColumns - $_.VDEAware = $AssocVD.IsEnclosureAware - } - } - } - } - - Write-Output $Associations - } - - $Associations = $AssocJob | Wait-Job | Receive-Job - $AssocJob | Remove-Job - - [System.Console]::Clear() - - $Volumes |? FileSystem -eq CSVFS | Sort-Object SizeRemaining | - Format-Table -AutoSize @{Expression={$poolName = VolumeToPool($_.Path); "[$(PoolOperationalStatus($_.Path))/$(PoolHealthStatus($_.Path))] " + $poolName};Label="[OpStatus/Health] Pool"}, - @{Expression={(PoolHealthyPDs(VolumeToPool($_.Path)))};Label="HealthyPhysicalDisks"; Align="Center"}, - @{Expression={$vd = VolumeToVD($_.Path); "[$(VDOperationalStatus($_.Path))/$(VDHealthStatus($_.Path))] "+$vd};Label="[OpStatus/Health] VirtualDisk"}, - @{Expression={$csvVolume = VolumeToCSV($_.Path); "[" + $_.HealthStatus + "] " + $csvVolume};Label="[Health] CSV Volume"}, - @{Expression={$csvName = VolumeToCSVName($_.Path); $csvStatus = CSVStatus($_.Path); " [$csvStatus] " + $csvName};Label="[Status] CSV Name"}, - @{Expression={CSVToNode(VolumeToCSV($_.Path))};Label="Volume Owner"}, - @{Expression={VolumeToShare($_.Path)};Label="Share Name"}, - @{Expression={$VolResiliency = VolumeToResiliency($_.Path); $volColumns = VolumeToColumns($_.Path); "$VolResiliency,$volColumns" +"Col" };Label="Volume Configuration"}, - @{Expression={"{0:N2}" -f ($_.Size/1GB)};Label="Total Size";Width=11;Align="Right"}, - @{Expression={"{0:N2}" -f ($_.SizeRemaining/$_.Size*100)};Label="Avail%";Width=11;Align="Right"} - - StartMonitoring - } - - if ($MonitoringMode) { - StartMonitoring - } - - # - # Verify zip location - # - - if (-not (Test-PrefixFilePath ([ref] $ZipPrefix))) { - Write-Error "$ZipPrefix is not a valid prefix for ZIP: $ZipPrefix.ZIP must be creatable" - return - } - - # - # Veriyfing path - # - - if ($ReadFromPath -ne "") { - $Path = $ReadFromPath - $Read = $true - } else { - $Path = $TemporaryPath - $Read = $false - } - - if ($Read) { - $Path = Check-ExtractZip $Path - } else { - Remove-Item -Path $Path -ErrorAction SilentlyContinue -Recurse | Out-Null - md -ErrorAction SilentlyContinue $Path | Out-Null - } - - $PathObject = Get-Item $Path - if ($null -eq $PathObject) { Show-Error ("Path not found: $Path") } - $Path = $PathObject.FullName - - # Note: this should be unnecessary as soon as we have the discipline of Join-Path flushed through - if (-not $Path.EndsWith("\")) { $Path = $Path + "\" } - - ### - # Now handle read case - # - # Generate Summary report based on content. Note this may be an update beyond the version - # at the time of the gather stored in 0_CloudHealthSummary.log. - ### - - if ($Read) { - Show-SddcDiagnosticReport -Report Summary -ReportLevel Full $Path - return - } - - ### - # From here on, this is ONLY the gather/write case (once extraction complete) - ### - - # Start Transcript - $transcriptFile = Join-Path $Path "0_CloudHealthGatherTranscript.log" - try { - Start-Transcript -Path $transcriptFile -Force - } catch { - # show error and rethrow to terminate - Show-Error "Unable to start transcript at $transcriptFile" $_ - throw $_ - } - - try { - - Show-Update "Temporary write path : $Path" - - # - # Handle parameters to archive/pass into the summary report generator. - # XXX note expectedpools with S2D -> 1. Should we force/set? - # - - $Parameters = "" | Select-Object TodayDate, ExpectedNodes, ExpectedNetworks, ExpectedVolumes, - ExpectedPhysicalDisks, ExpectedPools, ExpectedEnclosures, ExpectedDedupVolumes, HoursOfEvents, Version - $TodayDate = Get-Date - $Parameters.TodayDate = $TodayDate - $Parameters.ExpectedNodes = $ExpectedNodes - $Parameters.ExpectedNetworks = $ExpectedNetworks - $Parameters.ExpectedVolumes = $ExpectedVolumes - $Parameters.ExpectedDedupVolumes = $ExpectedDedupVolumes - $Parameters.ExpectedPhysicalDisks = $ExpectedPhysicalDisks - $Parameters.ExpectedPools = $ExpectedPools - $Parameters.ExpectedEnclosures = $ExpectedEnclosures - $Parameters.HoursOfEvents = $HoursOfEvents - $Parameters.Version = (Get-Module $Module).Version.ToString() - $Parameters | Export-Clixml ($Path + "GetParameters.XML") - - Show-Update "$Module v $($Parameters.Version)" - - # - # Phase 1 - # - - Show-Update "<<< Phase 1 - Data Gather >>>`n" -ForegroundColor Cyan - - # - # Cluster Nodes - # Note: get unfiltered list for reporting, then filter for continued use during gather - # (i.e., only contact responsive nodes) - # - - try { $ClusterNodes = Get-NodeList -Cluster $ClusterName -Nodes $Nodelist } - catch { Show-Error "Unable to get Cluster Nodes for reporting" $_ } - $ClusterNodes | Export-Clixml ($Path + "GetClusterNode.XML") - - try { $ClusterNodes = Get-NodeList -Cluster $ClusterName -Nodes $Nodelist -Filter } - catch { Show-Error "Unable to get filtered Cluster Nodes for gathering" $_ } - - # use a filtered node as the access node - $AccessNode = Get-ClusterAccessNode @($ClusterNodes) - - # - # Get-Cluster - # - - try { - # discover name if called with default dot form and/or node list - if ($ClusterName -eq ".") { - foreach ($cn in $ClusterNodes) - { - $Cluster = Get-Cluster -Name $cn.Name -ErrorAction SilentlyContinue - if ($Cluster -ne $null) { break } - } - } else { - $Cluster = Get-Cluster -Name $ClusterName - } - } - catch { Show-Error("Cluster could not be contacted. `nError="+$_.Exception.Message) } - - if ($Cluster -ne $null) - { - $Cluster | Export-Clixml ($Path + "GetCluster.XML") - $ClusterName = $Cluster.Name + "." + $Cluster.Domain - $S2DEnabled = $Cluster.S2DEnabled - $ClusterDomain = $Cluster.Domain - - Write-Host "Cluster name : $ClusterName" - } - else - { - # We can only get here if -Nodelist was used, but cluster service isn't running - Show-Warning "Cluster service was not running on any node, some information will be unavailable" - $ClusterName = '' - $ClusterDomain = '' - - Write-Host "Cluster name : Unavailable, Cluster is not online on any node" - } - Write-Host ("Accessible Node List : " + [string]::Join(", ",$ClusterNodes.name)) - Write-Host "Access node : $AccessNode`n" - - # Create node-specific directories for content - - $ClusterNodes.Name |% { - md (Get-NodePath $Path $_) | Out-Null - } - - # - # Verify deduplication prerequisites on access node. - # - - $DedupEnabled = $true - if ($(Invoke-Command -ComputerName $AccessNode -ConfigurationName $SessionConfigurationName {(-not (Get-Command -Module Deduplication))} )) { - $DedupEnabled = $false - } - - #### - # Begin paralellized captures. - # Start accumulating static jobs which self-contain their gather. - # These are pulled in close to the end. Consider how to regularize this down the line. - #### - $JobStatic = @() - $JobCopyOut = @() - $JobCopyOutNoDelete = @() - - # capture Sddc Diagnostic Archive if requested and active on the target cluster - if ($Cluster -and - (Get-ClusteredScheduledTask -Cluster $Cluster -TaskName SddcDiagnosticArchive)) { - - if ($DaysOfArchive -gt 0) { - - Show-Update "Start gather of Sddc Diagnostic Archives ..." - - $JobStatic += Start-Job -Name 'Sddc Diagnostic Archive Report' { - - Import-Module $using:Module -ErrorAction SilentlyContinue - - # capture state of the job regardless of archive capture - $o = (Join-Path $using:Path SddcDiagnosticArchiveJob.txt) - Show-SddcDiagnosticArchiveJob -Cluster $using:Cluster > $o - - # use confirm to capture the version validation warnings for replay - note that - # we self-document the version producing the report, so we only need to look for/capture - # warnings to highlight variance - $o = (Join-Path $using:Path SddcDiagnosticArchiveJobWarn.txt) - $null = Confirm-SddcDiagnosticModule -Cluster $using:Cluster 3> $o - } - - $j = Invoke-SddcCommonCommand -ClusterNodes $ClusterNodes.Name -JobName SddcDiagnosticArchive -SessionConfigurationName $SessionConfigurationName -InitBlock $CommonFunc { - - Import-Module $using:Module -ErrorAction SilentlyContinue - - if (Test-SddcModulePresence) { - - $Path = $null - Get-SddcDiagnosticArchiveJobParameters -Path ([ref] $Path) - - # emit - & { - # filter archive? - if ($using:DaysOfArchive -ne -1) { - - # get archive in increasing order of time (our timestamp is lexically sortable) - $Archive = dir $Path\*.ZIP | sort -Descending - if ($Archive.Count -gt $using:DaysOfArchive) { - $Archive = $Archive[0..$($using:DaysOfArchive - 1)] - } - - $Archive.FullName - (dir $Path\*.log).FullName - - } else { - - # get entire archive - # note: we use the wildcard so that we copy the content of the directory - # to the appropriate destination. the path itself is configurable. - # see comment below. - Join-Path (gi $Path).FullName "*" - } - } |% { - - Get-AdminSharePathFromLocal $env:COMPUTERNAME $_ - } - } - } - - # since the archive directory is configurable, we always need to specify the - # destination within the capture - it may be \some\dir\foo, but we want it to be - # node_xxx\SddcDiagnosticArchive in the capture. - # - # we add a member to the jobs to indicate this. also rename them to indicate the - # activity in these jobs, so we report runtime in a more useful way. - $j.ChildJobs |% { - $_ | Add-Member -NotePropertyName Destination -NotePropertyValue SddcDiagnosticArchive - } - - # and add to the copyout-nodelete set - # we do not want to scrub away the archive, unlike content we generate on the target node - # and then do want to delete after capture. - $JobCopyOutNoDelete += $j - } - } - - if ($AccessNode) { - - Show-Update "Start gather of cluster configuration ..." - - $JobStatic += start-job -Name ClusterGroup { - try { - $o = Get-ClusterGroup -Cluster $using:AccessNode - $o | Export-Clixml ($using:Path + "GetClusterGroup.XML") - } - catch { Show-Warning("Unable to get Cluster Groups. `nError="+$_.Exception.Message) } - } - - $JobStatic += start-job -Name ClusterNetwork { - try { - $o = Get-ClusterNetwork -Cluster $using:AccessNode - $o | Export-Clixml ($using:Path + "GetClusterNetwork.XML") - } - catch { Show-Warning("Could not get Cluster Nodes. `nError="+$_.Exception.Message) } - } - - $JobStatic += start-job -Name ClusterResource { - try { - $o = Get-ClusterResource -Cluster $using:AccessNode - $o | Export-Clixml ($using:Path + "GetClusterResource.XML") - } - catch { Show-Warning("Unable to get Cluster Resources. `nError="+$_.Exception.Message) } - - } - - $JobStatic += start-job -Name ClusterResourceParameter { - try { - $o = Get-ClusterResource -Cluster $using:AccessNode | Get-ClusterParameter - $o | Export-Clixml ($using:Path + "GetClusterResourceParameters.XML") - } - catch { Show-Warning("Unable to get Cluster Resource Parameters. `nError="+$_.Exception.Message) } - } - - $JobStatic += start-job -Name ClusterSharedVolume { - try { - $o = Get-ClusterSharedVolume -Cluster $using:AccessNode - $o | Export-Clixml ($using:Path + "GetClusterSharedVolume.XML") - } - catch { Show-Warning("Unable to get Cluster Shared Volumes. `nError="+$_.Exception.Message) } - - } - } else { - Show-Update "... Skip gather of cluster configuration since cluster is not available" - } - - if ($IncludeClusterPerformanceHistory) { - - Show-Update "Starting ClusterPerformanceHistory log collection ..." - - $JobStatic += start-job -Name ClusterPerformanceHistory { - try { - Get-Clusterlog -ExportClusterPerformanceHistory -Destination $using:Path -PerformanceHistoryTimeFrame $using:PerformanceHistoryTimeFrame -Node $using:ClusterNodes.Name - } - catch { Show-Warning("Could not get ClusterPerformanceHistory. `nError="+$_.Exception.Message) } - } - } - - Show-Update "Start gather of driver information ..." - - $ClusterNodes.Name |% { - - $node = $_ - - $JobStatic += start-job -Name "Driver Information: $node" { - try { $o = Get-CimInstance -ClassName Win32_PnPSignedDriver -ComputerName $using:node } - catch { Show-Error("Unable to get Drivers on $using:node. `nError="+$_.Exception.Message) } - $o | Export-Clixml (Join-Path (Join-Path $using:Path "Node_$using:node") "GetDrivers.XML") - } - } - - # consider using this as the generic copyout job set - # these are gathers which are not remotable, which we run remote and copy back results for - # keep control of which gathers are fast and therefore for which serialization is not a major issue - # however, dividing these into distinct jobs helps when triaging hangs or sources of error - its a tradeoff - - Show-Update "Start gather of verifier ..." - - $JobCopyOut += Invoke-SddcCommonCommand -ClusterNodes $($ClusterNodes).Name -JobName Verifier -SessionConfigurationName $SessionConfigurationName -InitBlock $CommonFunc { - - # Verifier - - $LocalFile = Join-Path $env:temp "verifier-query.txt" - verifier /query > $LocalFile - Write-Output (Get-AdminSharePathFromLocal $env:COMPUTERNAME $LocalFile) - - $LocalFile = Join-Path $env:temp "verifier-querysettings.txt" - verifier /querysettings > $LocalFile - Write-Output (Get-AdminSharePathFromLocal $env:COMPUTERNAME $LocalFile) - } - - Show-Update "Start gather of filesystem filter status ..." - - $JobCopyOut += Invoke-SddcCommonCommand -ClusterNodes $($ClusterNodes).Name -JobName 'Filesystem Filter Manager' -SessionConfigurationName $SessionConfigurationName -InitBlock $CommonFunc { - - # Filter Manager - - $LocalFile = Join-Path $env:temp "fltmc.txt" - fltmc > $LocalFile - Write-Output (Get-AdminSharePathFromLocal $env:COMPUTERNAME $LocalFile) - - $LocalFile = Join-Path $env:temp "fltmc-instances.txt" - fltmc instances > $LocalFile - Write-Output (Get-AdminSharePathFromLocal $env:COMPUTERNAME $LocalFile) - } - - $JobCopyOutNoDelete += Invoke-SddcCommonCommand -ClusterNodes $($ClusterNodes).Name -JobName 'Copy WER ReportArchive' -SessionConfigurationName $SessionConfigurationName -InitBlock $CommonFunc { - - # ReportArchive copy (one-shot, we'll recursively copy) - - Write-Output (Get-AdminSharePathFromLocal $env:COMPUTERNAME $env:ProgramData\Microsoft\Windows\WER\ReportArchive) - } - - if ($IncludeDumps -eq $true) { - - $JobCopyOutNoDelete += Invoke-SddcCommonCommand -ClusterNodes $($ClusterNodes).Name -JobName 'Copy ReportQueue' -SessionConfigurationName $SessionConfigurationName -InitBlock $CommonFunc { - - # ReportQueue copy (one-shot, we'll recursively copy) - - Write-Output (Get-AdminSharePathFromLocal $env:COMPUTERNAME $env:ProgramData\Microsoft\Windows\WER\ReportQueue) - } - } - - if ($IncludeProcessDump) { - - $JobCopyOut += Invoke-SddcCommonCommand -ClusterNodes $($ClusterNodes).Name -JobName ProcessDumps -SessionConfigurationName $SessionConfigurationName -InitBlock $CommonFunc -ArgumentList $ProcessLists { - - Param($ProcessLists) - - $NodePath = $env:Temp - $Node = $env:COMPUTERNAME - - #Default dump processes. - $DumpProcesses = @("vmms", "vmcompute", "vmwp", "rhs", "clussvc") - - #Appending user passed process lists. - if ($ProcessLists -ne $null) { - $DumpProcesses += $ProcessLists.split(",") - } - - - $DumpFileFolder = Join-Path -Path $NodePath -ChildPath 'ProcessDumps' - - if (Test-Path -Path $DumpFileFolder) { - Remove-Item -Path $DumpFileFolder -Recurse -Force - } - - $null = New-Item -Path $DumpFileFolder -ItemType Directory - $WER = [PSObject].Assembly.GetType('System.Management.Automation.WindowsErrorReporting') - $NativeMethods = $WER.GetNestedType('NativeMethods', 'NonPublic') - $MiniDump = $NativeMethods.GetMethod('MiniDumpWriteDump', ([Reflection.BindingFlags]'NonPublic, Static')) - $MiniDumpWithFullMemory = [UInt32] 2 - $ProcessList = @{} - - foreach ($ProcessName in $DumpProcesses) { - - $ProcessIds = Get-Process -Name $ProcessName -ErrorAction SilentlyContinue | Select-Object -ExpandProperty Id - - if (-not $ProcessIds) { - Show-Warning "Could not generate minidump for process $ProcessName" - continue; - } - - foreach ($ProcessId in $ProcessIds) { - - #Already collected. - if ($ProcessList[$ProcessId]) { - continue; - } - - $ProcessList.Add($ProcessId, $ProcessName) - - $Process = Get-Process -Id $ProcessId - $ProcessId = $Process.Id - $ProcessHandle = $Process.Handle - $DumpFileName = "$($ProcessName)_$($ProcessId).dmp" - - $DumpFilePath = Join-Path $DumpFileFolder $DumpFileName - - $DumpFile = New-Object IO.FileStream($DumpFilePath, [IO.FileMode]::Create) - - $Result = $MiniDump.Invoke($null, @($ProcessHandle, #hProcess - $ProcessId, #ProcessId - $DumpFile.SafeFileHandle, #hFile - $MiniDumpWithFullMemory #DumpType - [IntPtr]::Zero, #ExceptionParam - [IntPtr]::Zero, #UserStreamParam - [IntPtr]::Zero)) #CallbackParam - - $DumpFile.Close() - - if(-not $Result) { - Show-Warning "Failed to write dump file for process $psname with PID $ProcessId." - Remove-Item $DumpFilePath - } else { - Write-Output (Get-AdminSharePathFromLocal $Node $DumpFilePath) - } - } - } - } - } - - if ($IncludeGetNetView) { - - Show-Update "Start gather of Get-NetView ..." - - $JobCopyOut += Invoke-SddcCommonCommand -ArgumentList $SkipVm -ClusterNodes $($ClusterNodes).Name -JobName 'GetNetView' -SessionConfigurationName $SessionConfigurationName -InitBlock $CommonFunc { - - Param($SkipVM) - - $NodePath = $env:Temp - - # create a directory to capture GNV - - $gnvDir = Join-Path $NodePath 'GetNetView' - Remove-Item -Recurse -Force $gnvDir -ErrorAction SilentlyContinue - $null = md $gnvDir -Force -ErrorAction SilentlyContinue - - # run inside a child session so we can sink output to the transcript - # we must pass the GNV dir since $using is statically evaluated in the - # outermost scope and $gnvDir is inside the Invoke call. - - $j = Start-Job -ArgumentList $gnvDir,$SkipVM { - - param($gnvDir,$SkipVM) - - # start gather transcript to the GNV directory - - $transcriptFile = Join-Path $gnvDir "0_GetNetViewGatherTranscript.log" - Start-Transcript -Path $transcriptFile -Force - - if (Get-Command Get-NetView -ErrorAction SilentlyContinue) { - if ($SkipVM) { - Get-NetView -OutputDirectory $gnvDir -SkipLogs -SkipVM - } else { - Get-NetView -OutputDirectory $gnvDir -SkipLogs - } - } else { - Write-Host "Get-NetView command not available" - } - - Stop-Transcript - } - - # do not receive job - sunk to transcript for offline analysis - # gnv produces a very large quantity of host output - $null = $j | Wait-Job - $j | Remove-Job - - # wipe all non-file content (gnv produces zip + uncompressed dir, don't need the dir) - dir $gnvDir -Directory |% { - Remove-Item -Recurse -Force $_.FullName - } - - # gather all remaining content (will be the zip + transcript) in GNV directory - Write-Output (Get-AdminSharePathFromLocal $env:COMPUTERNAME $gnvDir) - } - } - - # Events, cmd, reports, et.al. - Show-Update "Start gather of system info, cluster/health logs, reports and dump files ..." - - $JobStatic += Start-Job -Name ClusterLogs { - $null = Get-ClusterLog -Node $using:ClusterNodes.Name -Destination $using:Path -UseLocalTime - } - - if ($S2DEnabled) { - $JobStatic += Start-Job -Name ClusterHealthLogs { - $null = Get-ClusterLog -Node $using:ClusterNodes.Name -Destination $using:Path -Health -UseLocalTime - } - } - - $JobStatic += $ClusterNodes.Name |% { - - $NodeName = $_ - - Invoke-SddcCommonCommand -JobName "System Info: $NodeName" -InitBlock $CommonFunc -SessionConfigurationName $SessionConfigurationName -ScriptBlock { - - $Node = "$using:NodeName" - if ($using:ClusterDomain.Length) { - $Node += ".$using:ClusterDomain" - } - - $LocalNodeDir = Get-NodePath $using:Path $using:NodeName - - # Text-only conventional commands - # - # Gather SYSTEMINFO.EXE output for a given node - SystemInfo.exe /S $using:NodeName > (Join-Path (Get-NodePath $using:Path $using:NodeName) "SystemInfo.TXT") - - # Cmdlets to drop in TXT and XML forms - # - # cmd is of the form "cmd arbitraryConstantArgs -argForComputerOrSessionSpecification" - # will be trimmed to "cmd" for logging - # _A_ token will be replaced with the chosen cluster access node - # _C_ token will be replaced with node fqdn for cimsession/computername callouts - # _N_ token will be replaced with node non-fqdn - $CmdsToLog = 'Get-HotFix -ComputerName _C_', - 'Get-NetAdapter -CimSession _C_', - 'Get-NetAdapterAdvancedProperty -CimSession _C_', - 'Get-NetAdapterBinding -CimSession _C_', - 'Get-NetAdapterChecksumOffload -CimSession _C_', - 'Get-NetAdapterIPsecOffload -CimSession _C_', - 'Get-NetAdapterLso -CimSession _C_', - 'Get-NetAdapterPacketDirect -CimSession _C_', - 'Get-NetAdapterRdma -CimSession _C_', - 'Get-NetAdapterRsc -CimSession _C_', - 'Get-NetAdapterRss -CimSession _C_', - 'Get-NetAdapterVmq -CimSession _C_', - 'Get-NetIPv4Protocol -CimSession _C_', - 'Get-NetIPv6Protocol -CimSession _C_', - 'Get-NetIpAddress -CimSession _C_', - 'Get-NetLbfoTeam -CimSession _C_', - 'Get-NetLbfoTeamMember -CimSession _C_', - 'Get-NetLbfoTeamNic -CimSession _C_', - 'Get-NetOffloadGlobalSetting -CimSession _C_', - 'Get-NetPrefixPolicy -CimSession _C_', - 'Get-NetQosPolicy -CimSession _C_', - 'Get-NetRoute -CimSession _C_', - 'Get-Disk -CimSession _C_', - 'Get-NetTcpConnection -CimSession _C_', - 'Get-NetTcpSetting -CimSession _C_', - 'Get-ScheduledTask -CimSession _C_ | Get-ScheduledTaskInfo -CimSession _C_', - 'Get-SmbServerNetworkInterface -CimSession _C_', - 'Get-StorageFaultDomain -CimSession _A_ -Type StorageScaleUnit |? FriendlyName -eq _N_ | Get-StorageFaultDomain -CimSession _A_' - - # These commands are specific to optional modules, add only if present - # - DcbQos: RoCE environments primarily - # - Hyper-V: may be ommitted in SOFS-only cases - if (Get-Module DcbQos -ErrorAction SilentlyContinue) { - $CmdsToLog += 'Get-NetQosDcbxSetting -CimSession _C_', - 'Get-NetQosFlowControl -CimSession _C_', - 'Get-NetQosTrafficClass -CimSession _C_' - } - - if (Get-Module Hyper-V -ErrorAction SilentlyContinue) { - $CmdsToLog += 'Get-VM -CimSession _C_ -ErrorAction SilentlyContinue', - 'Get-VMNetworkAdapter -All -CimSession _C_ -ErrorAction SilentlyContinue', - 'Get-VMSwitch -CimSession _C_ -ErrorAction SilentlyContinue' - } - - foreach ($cmd in $CmdsToLog) { - - # truncate cmd string to the cmd itself - $LocalFile = (Join-Path $LocalNodeDir (($cmd.split(' '))[0] -replace "-","")) - try { - - $cmdex = $cmd -replace '_C_',$using:NodeName -replace '_N_',$using:NodeName -replace '_A_',$using:AccessNode - $out = iex $cmdex - - # capture as txt and xml for quick analysis according to taste - $out | ft -AutoSize | Out-File -Width 9999 -Encoding ascii -FilePath "$LocalFile.txt" - $out | Export-Clixml -Path "$LocalFile.xml" - - } catch { - Show-Warning "'$cmdex' failed for node $Node ($($_.Exception.Message))" - } - } - - $NodeSystemRootPath = Invoke-Command -ComputerName $using:NodeName -ConfigurationName $using:SessionConfigurationName { $env:SystemRoot } - - # Avoid to use 'Join-Path' because the drive of path may not exist on the local machine. - if ($using:IncludeDumps -eq $true) { - - $NodeMinidumpsPath = Invoke-Command -ComputerName $using:NodeName -ConfigurationName $using:SessionConfigurationName { (Get-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Control\CrashControl').MinidumpDir } -ErrorAction SilentlyContinue - $NodeLiveKernelReportsPath = Invoke-Command -ComputerName $using:NodeName -ConfigurationName $using:SessionConfigurationName { (Get-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Control\CrashControl\LiveKernelReports').LiveKernelReportsPath } -ErrorAction SilentlyContinue - ## - # Minidumps - ## - - try { - # Use the registry key value if it exists. - if ($NodeMinidumpsPath) { - $RPath = (Get-AdminSharePathFromLocal $using:NodeName "$NodeMinidumpsPath\*.dmp") - } - else { - $RPath = (Get-AdminSharePathFromLocal $using:NodeName "$NodeSystemRootPath\Minidump\*.dmp") - } - - $DmpFiles = Get-ChildItem -Path $RPath -Recurse -ErrorAction SilentlyContinue - } - catch { $DmpFiles = ""; Show-Warning "Unable to get minidump files for node $using:NodeName" } - - $DmpFiles |% { - try { Copy-Item $_.FullName $LocalNodeDir } - catch { Show-Warning("Could not copy minidump file $_.FullName") } - } - - ## - # Live Kernel Reports - ## - - try { - # Use the registry key value if it exists. - if ($NodeLiveKernelReportsPath) { - $RPath = (Get-AdminSharePathFromLocal $using:NodeName "$NodeLiveKernelReportsPath\*.dmp") - } - else { - $RPath = (Get-AdminSharePathFromLocal $using:NodeName "$NodeSystemRootPath\LiveKernelReports\*.dmp") - } - - $DmpFiles = Get-ChildItem -Path $RPath -Recurse -ErrorAction SilentlyContinue - } - catch { $DmpFiles = ""; Show-Warning "Unable to get LiveKernelReports files for node $using:NodeName" } - - $DmpFiles |% { - try { Copy-Item $_.FullName $LocalNodeDir } - catch { Show-Warning "Could not copy LiveKernelReports file $($_.FullName)" } - } - } - - try { - $RPath = (Get-AdminSharePathFromLocal $using:NodeName "$NodeSystemRootPath\Cluster\Reports\*.*") - $RepFiles = Get-ChildItem -Path $RPath -Recurse -ErrorAction SilentlyContinue } - catch { $RepFiles = ""; Show-Warning "Unable to get reports for node $using:NodeName" } - - $LocalReportDir = Join-Path $LocalNodeDir "ClusterReports" - md $LocalReportDir | Out-Null - - # Copy logs from the Report directory; exclude cluster/health logs which we're getting seperately - $RepFiles |% { - if (($_.Name -notlike "Cluster.log") -and ($_.Name -notlike "ClusterHealth.log")) { - try { Copy-Item $_.FullName $LocalReportDir } - catch { Show-Warning "Could not copy report file $($_.FullName)" } - } - } - } - } - - Show-Update "Starting export diagnostic log and live dump ..." - - $JobCopyOut += Invoke-SddcCommonCommand -ArgumentList $IncludeLiveDump,$IncludeStorDiag -ClusterNodes $AccessNode -SessionConfigurationName $SessionConfigurationName -InitBlock $CommonFunc -JobName StorageDiagnosticInfoAndLiveDump { - - Param($IncludeLiveDump,$IncludeStorDiag) - - $Node = $env:COMPUTERNAME - $NodePath = $env:Temp - - $destinationPath = Join-Path -Path $NodePath -ChildPath 'StorageDiagnosticDump' - - if (Test-Path -Path $destinationPath) { - Remove-Item -Path $destinationPath -Recurse -Force - } - - $clusterSubsystem = (Get-StorageSubSystem |? Model -eq 'Clustered Windows Storage').FriendlyName - - if ($IncludeLiveDump) { - Get-StorageDiagnosticInfo -StorageSubSystemFriendlyName $clusterSubsystem -IncludeLiveDump -DestinationPath $destinationPath - - # Copy storage diagnostic and live dump information (one-shot, we'll recursively copy) - Write-Output (Get-AdminSharePathFromLocal $Node $destinationPath) - } - elseif ($IncludeStorDiag) { - Get-StorageDiagnosticInfo -StorageSubSystemFriendlyName $clusterSubsystem -DestinationPath $destinationPath - - # Copy storage diagnostic and live dump information (one-shot, we'll recursively copy) - Write-Output (Get-AdminSharePathFromLocal $Node $destinationPath) - } - } - - Show-Update "Starting export of events ..." - - $JobCopyOut += Invoke-SddcCommonCommand -ArgumentList $HoursOfEvents -ClusterNodes $($ClusterNodes).Name -SessionConfigurationName $SessionConfigurationName -InitBlock $CommonFunc -JobName Events { - - Param([int] $Hours) - - $Node = $env:COMPUTERNAME - $NodePath = $env:Temp - - Get-SddcCapturedEvents $NodePath $Hours |% { - - Write-Output (Get-AdminSharePathFromLocal $Node $_) - } - - # Also export locale metadata for off-system rendering (one-shot, we'll recursively copy) - Write-Output (Get-AdminSharePathFromLocal $Node (Join-Path $NodePath "LocaleMetaData")) - } - - if ($IncludeAssociations -and $ClusterName.Length) { - - # This is used at Phase 2 and is run asynchronously since - # it can take some time to gather for large numbers of devices. - - # Gather nodes view of storage and build all the associations - - $SNVJob = Start-Job -Name 'StorageNodePhysicalDiskView' -ArgumentList $ClusterName { - param ($ClusterName) - $clusterCimSession = New-CimSession -ComputerName $ClusterName - $snvInstances = Get-CimInstance -Namespace root\Microsoft\Windows\Storage -ClassName MSFT_StorageNodeToPhysicalDisk -CimSession $clusterCimSession - $allPhysicalDisks = Get-PhysicalDisk -CimSession $clusterCimSession - $SNV = @() - - Foreach ($phyDisk in $snvInstances) { - $SNVObject = New-Object -TypeName System.Object - $pdIndex = $phyDisk.PhysicalDiskObjectId.IndexOf("PD:") - $pdLength = $phyDisk.PhysicalDiskObjectId.Length - $pdID = $phyDisk.PhysicalDiskObjectId.Substring($pdIndex+3, $pdLength-($pdIndex+4)) - $PDUID = ($allPhysicalDisks |? ObjectID -Match $pdID).UniqueID - $pd = $allPhysicalDisks |? UniqueID -eq $PDUID - $nodeIndex = $phyDisk.StorageNodeObjectId.IndexOf("SN:") - $nodeLength = $phyDisk.StorageNodeObjectId.Length - $storageNodeName = $phyDisk.StorageNodeObjectId.Substring($nodeIndex+3, $nodeLength-($nodeIndex+4)) - $poolName = ($pd | Get-StoragePool -CimSession $clusterCimSession -ErrorAction SilentlyContinue |? IsPrimordial -eq $false).FriendlyName - if (-not $poolName) { - continue - } - - $SNVObject | Add-Member -Type NoteProperty -Name PhysicalDiskUID -Value $PDUID - $SNVObject | Add-Member -Type NoteProperty -Name StorageNode -Value $storageNodeName - $SNVObject | Add-Member -Type NoteProperty -Name StoragePool -Value $poolName - $SNVObject | Add-Member -Type NoteProperty -Name MPIOPolicy -Value $phyDisk.LoadBalancePolicy - $SNVObject | Add-Member -Type NoteProperty -Name MPIOState -Value $phyDisk.IsMPIOEnabled - $SNVObject | Add-Member -Type NoteProperty -Name StorageEnclosure -Value $pd.PhysicalLocation - $SNVObject | Add-Member -Type NoteProperty -Name PathID -Value $phyDisk.PathID - $SNVObject | Add-Member -Type NoteProperty -Name PathState -Value $phyDisk.PathState - - $SNV += $SNVObject - } - Write-Output $SNV - } - - # Gather association between pool, virtualdisk, volume, share. - - $AssocJob = Start-Job -Name 'StorageComponentAssociations' -ArgumentList $AccessNode,$ClusterName { - param($AccessNode,$ClusterName) - - $SmbShares = Get-SmbShare -CimSession $AccessNode - $Associations = Get-VirtualDisk -CimSession $AccessNode |% { - - $o = $_ | Select-Object FriendlyName, OperationalStatus, HealthStatus, CSVName, CSVStatus, CSVNode, CSVPath, CSVVolume, - ShareName, SharePath, VolumeID, PoolName, PoolOpStatus, PoolHealthStatus, VDResiliency, VDCopies, VDColumns, VDEAware - - $AssocCSV = $_ | Get-ClusterSharedVolume -Cluster $ClusterName - - if ($AssocCSV) { - $o.CSVName = $AssocCSV.Name - $o.CSVStatus = $AssocCSV.State - $o.CSVNode = $AssocCSV.OwnerNode.Name - $o.CSVPath = $AssocCSV.SharedVolumeInfo.FriendlyVolumeName - if ($o.CSVPath.Length -ne 0) { - $o.CSVVolume = $o.CSVPath.Split("\")[2] - } - $AssocLike = $o.CSVPath+"\*" - $AssocShares = $SmbShares |? Path -like $AssocLike - $AssocShare = $AssocShares | Select-Object -First 1 - if ($AssocShare) { - $o.ShareName = $AssocShare.Name - $o.SharePath = $AssocShare.Path - $o.VolumeID = $AssocShare.Volume - if ($AssocShares.Count -gt 1) { $o.ShareName += "*" } - } - } - - Write-Output $o - } - - $AssocPool = Get-StoragePool -CimSession $AccessNode -ErrorAction SilentlyContinue - $AssocPool |% { - $AssocPName = $_.FriendlyName - $AssocPOpStatus = $_.OperationalStatus - $AssocPHStatus = $_.HealthStatus - Get-StoragePool -CimSession $AccessNode -FriendlyName $AssocPName | - Get-VirtualDisk -CimSession $AccessNode |% { - $AssocVD = $_ - $Associations |% { - if ($_.FriendlyName -eq $AssocVD.FriendlyName) { - $_.PoolName = $AssocPName - $_.PoolOpStatus = $AssocPOpStatus - $_.PoolHealthStatus = $AssocPHStatus - $_.VDResiliency = $AssocVD.ResiliencySettingName - $_.VDCopies = $AssocVD.NumberofDataCopies - $_.VDColumns = $AssocVD.NumberofColumns - $_.VDEAware = $AssocVD.IsEnclosureAware - } - } - } - } - - Write-Output $Associations - } - } - - # - # SMB share health/status - # - - Show-Update "SMB Shares" - - try { $SmbShares = Get-SmbShare -CimSession $AccessNode } - catch { Show-Error("Unable to get SMB Shares. `nError="+$_.Exception.Message) } - - # XXX only sharepath and health are added in, why are we selecting down to just these four as opposed to add-member? - $ShareStatus = $SmbShares |? ContinuouslyAvailable | Select-Object ScopeName, Name, SharePath, Health - $Count1 = 0 - $Total1 = NCount($ShareStatus) - - if ($Total1 -gt 0) - { - $ShareStatus |% { - $Progress = $Count1 / $Total1 * 100 - $Count1++ - Write-Progress -Activity "Testing file share access" -PercentComplete $Progress - - if ($ClusterDomain -ne "") - { - $_.SharePath = "\\" + $_.ScopeName + "." + $ClusterDomain + "\" + $_.Name - } - else - { - $_.SharePath = "\\" + $_.ScopeName + "\" + $_.Name - } - try { if (Test-Path -Path $_.SharePath -ErrorAction SilentlyContinue) { - $_.Health = "Accessible" - } else { - $_.Health = "Inaccessible" - } - } - catch { $_.Health = "Accessible: "+$_.Exception.Message } - } - Write-Progress -Activity "Testing file share access" -Completed - } - - $ShareStatus | Export-Clixml ($Path + "ShareStatus.XML") - - Show-Update "SMB Share Open Files" - - try { - $o = Get-SmbOpenFile -CimSession $AccessNode - $o | Export-Clixml ($Path + "GetSmbOpenFile.XML") } - catch { Show-Error("Unable to get Open Files. `nError="+$_.Exception.Message) } - - Show-Update "SMB Share Witness" - - try { - $o = Get-SmbWitnessClient -CimSession $AccessNode - $o | Export-Clixml ($Path + "GetSmbWitness.XML") } - catch { Show-Error("Unable to get Open Files. `nError="+$_.Exception.Message) } - - Show-Update "Clustered Subsystem" - - # NOTE: $Subsystem is reused several times below - try { - $Subsystem = Get-StorageSubsystem Cluster* -CimSession $AccessNode - $Subsystem | Export-Clixml ($Path + "GetStorageSubsystem.XML") - } - catch { Show-Warning("Unable to get Clustered Subsystem.`nError="+$_.Exception.Message) } - - # Automatic triage is dependent on the cluster (Health Resource), avoid spurious - # errors if not available - if ($Subsystem.HealthStatus -notlike "Healthy" -and $ClusterName.Length) { - Show-Update "Triage for Clustered Subsystem (HealthStatus = $($Subsystem.HealthStatus))" - try { - $cmdlet = Get-Command Get-HealthFault -ErrorAction SilentlyContinue - if ($null -ne $cmdlet -and $cmdlet.Source -eq 'FailoverClusters') { - Get-HealthFault -CimSession $AccessNode | - Export-Clixml (Join-Path $Path "HeathFault.XML") - } else { - $Subsystem | Debug-StorageSubsystem -CimSession $AccessNode | - Export-Clixml (Join-Path $Path "DebugStorageSubsystem.XML") - } - } - catch { Show-Error "Unable to get Get-HealthFault or Debug-StorageSubsystem for unhealthy StorageSubsystem.`nError=" $_ } - } - - Show-Update "Volumes & Virtual Disks" - - # Volume status - - try { - $Volumes = Get-Volume -CimSession $AccessNode -StorageSubSystem $Subsystem - $Volumes | Export-Clixml ($Path + "GetVolume.XML") } - catch { Show-Error("Unable to get Volumes. `nError="+$_.Exception.Message) } - - - # Virtual disk health - # Used in S2D-specific gather below - - try { - $VirtualDisk = Get-VirtualDisk -CimSession $AccessNode -StorageSubSystem $Subsystem - $VirtualDisk | Export-Clixml ($Path + "GetVirtualDisk.XML") - } - catch { Show-Warning("Unable to get Virtual Disks.`nError="+$_.Exception.Message) } - - # Deduplicated volume health - # XXX the counts/healthy likely not needed once phase 2 shifted into summary report - - if ($DedupEnabled) - { - Show-Update "Dedup Volume Status" - - try { - $DedupVolumes = Invoke-Command -ComputerName $AccessNode -ConfigurationName $SessionConfigurationName { Get-DedupStatus } - $DedupVolumes | Export-Clixml ($Path + "GetDedupVolume.XML") } - catch { Show-Error("Unable to get Dedup Volumes.`nError="+$_.Exception.Message) } - - $DedupTotal = NCount($DedupVolumes) - $DedupHealthy = NCount($DedupVolumes |? LastOptimizationResult -eq 0 ) - - } else { - - $DedupVolumes = @() - $DedupTotal = 0 - $DedupHealthy = 0 - } - - Show-Update "Storage Pool & Tiers" - - # Storage tier information - - try { - Get-StorageTier -CimSession $AccessNode | - Export-Clixml ($Path + "GetStorageTier.XML") } - catch { Show-Warning("Unable to get Storage Tiers. `nError="+$_.Exception.Message) } - - # Storage pool health - - try { - $StoragePools = @(Get-StoragePool -IsPrimordial $False -CimSession $AccessNode -StorageSubSystem $Subsystem -ErrorAction SilentlyContinue) - $StoragePools | Export-Clixml ($Path + "GetStoragePool.XML") } - catch { Show-Error("Unable to get Storage Pools. `nError="+$_.Exception.Message) } - - Show-Update "Storage Jobs" - - try { - # cannot subsystem scope Get-StorageJob at this time - icm $AccessNode -ConfigurationName $SessionConfigurationName { Get-StorageJob } | - Export-Clixml ($Path + "GetStorageJob.XML") } - catch { Show-Warning("Unable to get Storage Jobs. `nError="+$_.Exception.Message) } - - Show-Update "Clustered PhysicalDisks and SNV" - - # Physical disk health - - try { - $PhysicalDisks = Get-PhysicalDisk -CimSession $AccessNode -StorageSubSystem $Subsystem - $PhysicalDisks | Export-Clixml ($Path + "GetPhysicalDisk.XML") } - catch { Show-Error("Unable to get Physical Disks. `nError="+$_.Exception.Message) } - - try { - $PhysicalDiskSNV = Get-PhysicalDisk -CimSession $AccessNode -StorageSubSystem $Subsystem | Get-PhysicalDiskSNV -CimSession $AccessNode | - Export-Clixml ($Path + "GetPhysicalDiskSNV.XML") } - catch { Show-Error("Unable to get Physical Disk Storage Node View. `nError="+$_.Exception.Message) } - - # Reliability counters - # These may cause a latency burst on some devices due to device-specific requirements for lifting/generating - # the SMART data which underlies them. Decline to do this by default. - - if ($IncludeReliabilityCounters -eq $true) { - - Show-Update "Storage Reliability Counters" - - try { - $PhysicalDisks | Get-StorageReliabilityCounter -CimSession $AccessNode | - Export-Clixml ($Path + "GetReliabilityCounter.XML") } - catch { Show-Error("Unable to get Storage Reliability Counters. `nError="+$_.Exception.Message) } - - } - - # Storage enclosure health - - Show-Update "Storage Enclosures" - - try { - Get-StorageEnclosure -CimSession $AccessNode -StorageSubSystem $Subsystem | - Export-Clixml ($Path + "GetStorageEnclosure.XML") } - catch { Show-Error("Unable to get Enclosures. `nError="+$_.Exception.Message) } - - # Undo changes as this is failing in AzureStack environment. - # SDDC cim objects - - #Show-Update "SDDC Cim Objects" - - #foreach($objType in @("Drive","Server","Volume","Cluster","VirtualMachine","VirtualSwitch")) - #{ - # try { - # $className = "SDDC_"+$objType; - # Get-CimInstance -Namespace "root\SDDC\Management" -ClassName $className | Export-Clixml ($Path + "GetSddc"+$objType+".XML"); - # } - # catch { Show-Warning("Unable to get SDDC "+$objType+". `nError="+$_.Exception.Message) } - #} - - # - # Generate SBL Connectivity report based on input clusport information - # - - if ($S2DEnabled) { - - Show-Update "Pooled Disks" - - try { - if ($StoragePools.Count -eq 1) { - $StoragePools | Get-PhysicalDisk -CimSession $AccessNode | - Export-Clixml (Join-Path $Path ("GetPhysicalDisk_Pool.xml")) - } - } catch { - Show-Error "Not able to query pooled disks" $_ - } - - Show-Update "Storage Scale Units" - - try { - $Subsystem | Get-StorageFaultDomain -CimSession $AccessNode -Type StorageScaleUnit | - Export-Clixml (Join-Path $Path ("GetStorageFaultDomain_SSU.xml")) - } catch { - Show-Error "Not able to query Storage Scale Units" $_ - } - - Show-Update "S2D Connectivity" - - try { - $JobStatic += $ClusterNodes |% { - $node = $_.Name - start-job -Name "S2D Connectivity: $node" { - Get-CimInstance -Namespace root\wmi -ClassName ClusPortDeviceInformation -ComputerName $using:node | - Export-Clixml (Join-Path (Join-Path $using:Path "Node_$using:node") "ClusPort.xml") - Get-CimInstance -Namespace root\wmi -ClassName ClusBfltDeviceInformation -ComputerName $using:node | - Export-Clixml (Join-Path (Join-Path $using:Path "Node_$using:node") "ClusBflt.xml") - } - } - } catch { - Show-Warning "Gathering S2D connectivity failed" - } - } - - #### - # Now receive the jobs requiring remote copyout - #### - - if ($JobCopyOut.Count -or $JobCopyOutNoDelete.Count) { - - Show-Update "Completing jobs with remote copyout ..." -ForegroundColor Green - Show-WaitChildJob ($JobCopyOut + $JobCopyOutNoDelete) 120 - Show-Update "Starting remote copyout ..." - - # keep parallelizing on receive at the individual node/child job level - $JobCopy = @() - if ($JobCopyOut.Count) { $JobCopy += Start-CopyJob $Path -Delete $JobCopyOut } - if ($JobCopyOutNoDelete.Count) { $JobCopy += Start-CopyJob $Path $JobCopyOutNoDelete } - Show-WaitChildJob $JobCopy 30 - - # receive any copyout errors for logging/triage - Receive-Job $JobCopy - Remove-Job ($JobCopyOut + $JobCopyOutNoDelete) - Remove-Job $JobCopy - - if (Get-Member -InputObject $JobCopyOut ActiveSessions) - { - Remove-PSSession -Id $JobCopyOut.ActiveSessions - } - } - - Show-Update "All remote copyout complete" -ForegroundColor Green - - #### - # Now receive the static jobs - #### - - Show-Update "Completing background gathers ..." -ForegroundColor Green - Show-WaitChildJob $JobStatic 30 - Receive-Job $JobStatic - Remove-Job $JobStatic - - if (Get-Member -InputObject $JobStatic ActiveSessions) - { - Remove-PSSession -Id $JobStatic.ActiveSessions - } - - # wipe variables to catch reuse - Remove-Variable JobCopyOut - Remove-Variable JobStatic - - # - # Phase 2 Prep - # - Show-Update "<<< Phase 2 - Pool, Physical Disk and Volume Details >>>" -ForegroundColor Cyan - - if ($IncludeAssociations) { - - if ($Read) { - $Associations = Import-ClixmlIf ($Path + "GetAssociations.XML") - $SNVView = Import-ClixmlIf ($Path + "GetStorageNodeView.XML") - } else { - "`nCollecting device associations..." - try { - $Associations = $AssocJob | Wait-Job | Receive-Job - $AssocJob | Remove-Job - if ($null -eq $Associations) { - Show-Warning "Unable to get object associations" - } - $Associations | Export-Clixml ($Path + "GetAssociations.XML") - - "`nCollecting storage view associations..." - $SNVView = $SNVJob | Wait-Job | Receive-Job - $SNVJob | Remove-Job - if ($null -eq $SNVView) { - Show-Warning "Unable to get nodes storage view associations" - } - $SNVView | Export-Clixml ($Path + "GetStorageNodeView.XML") - } catch { - Show-Warning "Not able to query associations.." - } - } - } - - # - # Phase 2 - # - - if ($IncludeHealthReport) { - "`n[Health Report]" - "`nVolumes with status, total size and available size, sorted by Available Size" - "Notes: Sizes shown in gigabytes (GB). * means multiple shares on that volume" - - $Volumes |? FileSystem -eq CSVFS | Sort-Object SizeRemaining | - Format-Table -AutoSize @{Expression={$poolName = VolumeToPool($_.Path); "[$(PoolOperationalStatus($_.Path))/$(PoolHealthStatus($_.Path))] " + $poolName};Label="[OpStatus/Health] Pool"}, - @{Expression={(PoolHealthyPDs(VolumeToPool($_.Path)))};Label="HealthyPhysicalDisks"; Align="Center"}, - @{Expression={$vd = VolumeToVD($_.Path); "[$(VDOperationalStatus($_.Path))/$(VDHealthStatus($_.Path))] "+$vd};Label="[OpStatus/Health] VirtualDisk"}, - @{Expression={$csvVolume = VolumeToCSV($_.Path); "[" + $_.HealthStatus + "] " + $csvVolume};Label="[Health] CSV Volume"}, - @{Expression={$csvName = VolumeToCSVName($_.Path); $csvStatus = CSVStatus($_.Path); " [$csvStatus] " + $csvName};Label="[Status] CSV Name"}, - @{Expression={CSVToNode(VolumeToCSV($_.Path))};Label="Volume Owner"}, - @{Expression={VolumeToShare($_.Path)};Label="Share Name"}, - @{Expression={$VolResiliency = VolumeToResiliency($_.Path); $volColumns = VolumeToColumns($_.Path); "$VolResiliency,$volColumns" +"Col" };Label="Volume Configuration"}, - @{Expression={"{0:N2}" -f ($_.Size/1GB)};Label="Total Size";Width=11;Align="Right"}, - @{Expression={"{0:N2}" -f ($_.SizeRemaining/$_.Size*100)};Label="Avail%";Width=11;Align="Right"} - - if ($DedupEnabled -and ($DedupTotal -gt 0)) - { - "Dedup Volumes with status, total size and available size, sorted by Savings %" - "Notes: Sizes shown in gigabytes (GB). * means multiple shares on that volume" - - $DedupVolumes | Sort-Object SavingsRate -Descending | - Format-Table -AutoSize @{Expression={$poolName = VolumeToPool($_.VolumeId); "[$(PoolOperationalStatus($_.VolumeId))/$(PoolHealthStatus($_.VolumeId))] " + $poolName};Label="[OpStatus/Health] Pool"}, - @{Expression={(PoolHealthyPDs(VolumeToPool($_.VolumeId)))};Label="HealthyPhysicalDisks"; Align="Center"}, - @{Expression={$vd = VolumeToVD($_.VolumeId); "[$(VDOperationalStatus($_.VolumeId))/$(VDHealthStatus($_.VolumeId))] "+$vd};Label="[OpStatus/Health] VirtualDisk"}, - @{Expression={VolumeToCSV($_.VolumeId)};Label="Volume "}, - @{Expression={VolumeToShare($_.VolumeId)};Label="Share"}, - @{Expression={"{0:N2}" -f ($_.Capacity/1GB)};Label="Capacity";Width=11;Align="Left"}, - @{Expression={"{0:N2}" -f ($_.UnoptimizedSize/1GB)};Label="Before";Width=11;Align="Right"}, - @{Expression={"{0:N2}" -f ($_.UsedSpace/1GB)};Label="After";Width=11;Align="Right"}, - @{Expression={"{0:N2}" -f ($_.SavingsRate)};Label="Savings%";Width=11;Align="Right"}, - @{Expression={"{0:N2}" -f ($_.FreeSpace/1GB)};Label="Free";Width=11;Align="Right"}, - @{Expression={"{0:N2}" -f ($_.FreeSpace/$_.Capacity*100)};Label="Free%";Width=11;Align="Right"}, - @{Expression={"{0:N0}" -f ($_.InPolicyFilesCount)};Label="Files";Width=11;Align="Right"} - } - - if ($SNVView) { - "`n[Storage Node view]" - $SNVView | sort StorageNode,StorageEnclosure | Format-Table -AutoSize @{Expression = {$_.StorageNode}; Label = "StorageNode"; Align = "Left"}, - @{Expression = {$_.StoragePool}; Label = "StoragePool"; Align = "Left"}, - @{Expression = {$_.MPIOPolicy}; Label = "MPIOPolicy"; Align = "Left"}, - @{Expression = {$_.MPIOState}; Label = "MPIOState"; Align = "Left"}, - @{Expression = {$_.PathID}; Label = "PathID"; Align = "Left"}, - @{Expression = {$_.PathState}; Label = "PathState"; Align = "Left"}, - @{Expression = {$_.PhysicalDiskUID}; Label = "PhysicalDiskUID"; Align = "Left"}, - @{Expression = {$_.StorageEnclosure}; Label = "StorageEnclosureLocation"; Align = "Left"} - } - - "`n[Capacity Report]" - "Physical disks by Enclosure, Media Type and Health Status, with total and unallocated space" - "Note: Sizes shown in gigabytes (GB)" - - $PDStatus = $PhysicalDisks |? EnclosureNumber -ne $null | - Sort-Object EnclosureNumber, MediaType, HealthStatus | - Group-Object EnclosureNumber, MediaType, HealthStatus | - Select-Object Count, TotalSize, Unalloc, - @{Expression={$_.Name.Split(",")[0].Trim().TrimEnd()}; Label="Enc"}, - @{Expression={$_.Name.Split(",")[1].Trim().TrimEnd()}; Label="Media"}, - @{Expression={$_.Name.Split(",")[2].Trim().TrimEnd()}; Label="Health"} - - $PDStatus |% { - $Current = $_ - $TotalSize = 0 - $Unalloc = 0 - $PDCurrent = $PhysicalDisks |? { ($_.EnclosureNumber -eq $Current.Enc) -and ($_.MediaType -eq $Current.Media) -and ($_.HealthStatus -eq $Current.Health) } - $PDCurrent |% { - $Unalloc += $_.Size - $_.AllocatedSize - $TotalSize +=$_.Size - } - - $Current.Unalloc = $Unalloc - $Current.TotalSize = $TotalSize - } - - $PDStatus | Format-Table -AutoSize Enc, Media, Health, Count, - @{Expression={"{0:N2}" -f ($_.TotalSize/$_.Count/1GB)};Label="Avg Size";Width=11;Align="Right"}, - @{Expression={"{0:N2}" -f ($_.TotalSize/1GB)};Label="Total Size";Width=11;Align="Right"}, - @{Expression={"{0:N2}" -f ($_.Unalloc/1GB)};Label="Unallocated";Width=11;Align="Right"}, - @{Expression={"{0:N2}" -f ($_.Unalloc/$_.TotalSize*100)};Label="Unalloc %";Width=11;Align="Right"} - - "Pools with health, total size and unallocated space" - "Note: Sizes shown in gigabytes (GB)" - - $StoragePools | Sort-Object FriendlyName | - Format-Table -AutoSize @{Expression={$_.FriendlyName};Label="Name"}, - @{Expression={$_.HealthStatus};Label="Health"}, - @{Expression={"{0:N2}" -f ($_.Size/1GB)};Label="Total Size";Width=11;Align="Right"}, - @{Expression={"{0:N2}" -f (($_.Size-$_.AllocatedSize)/1GB)};Label="Unallocated";Width=11;Align="Right"}, - @{Expression={"{0:N2}" -f (($_.Size-$_.AllocatedSize)/$_.Size*100)};Label="Unalloc%";Width=11;Align="Right"} - } - - # - # Phase 3 - # - Show-Update "<<< Phase 3 - Storage Performance >>>" -ForegroundColor Cyan - - if (-not $IncludePerformance) { - - "Performance was excluded by a parameter`n" - - } else { - - Show-Update "Get counter sets" - $set = Get-Counter -ListSet "Cluster Storage*","Cluster CSV*","Storage Spaces*","Refs","Cluster Disk Counters","PhysicalDisk" -ComputerName $ClusterNodes.Name - Show-Update "Start monitoring ($($PerfSamples)s)" - $PerfRaw = Get-Counter -Counter $set.Paths -SampleInterval 1 -MaxSamples $PerfSamples -ErrorAction Ignore -WarningAction Ignore - Show-Update "Exporting counters" - $PerfRaw | Export-counter -Path ($Path + "GetCounters.blg") -Force -FileFormat BLG - Show-Update "Completed" - - if ($ProcessCounter) { - - "Collected $PerfSamples seconds of raw performance counters. Processing...`n" - $Count1 = 0 - $Total1 = $PerfRaw.Count - - if ($Total1 -gt 0) { - - $PerfDetail = $PerfRaw |% { - $TimeStamp = $_.TimeStamp - - $Progress = $Count1 / $Total1 * 45 - $Count1++ - Write-Progress -Activity "Processing performance samples" -PercentComplete $Progress - - $_.CounterSamples |% { - $DetailRow = "" | Select-Object Time, Pool, Owner, Node, Volume, Share, Counter, Value - $Split = $_.Path.Split("\") - $DetailRow.Time = $TimeStamp - $DetailRow.Node = $Split[2] - $DetailRow.Volume = $_.InstanceName - $DetailRow.Counter = $Split[4] - $DetailRow.Value = $_.CookedValue - $DetailRow - } - } - - Write-Progress -Activity "Processing performance samples" -PercentComplete 50 - $PerfDetail = $PerfDetail | Sort-Object Volume - - $Last = $PerfDetail.Count - 1 - $Volume = "" - - $PerfVolume = 0 .. $Last |% { - - if ($Volume -ne $PerfDetail[$_].Volume) { - $Volume = $PerfDetail[$_].Volume - $Pool = CSVToPool ($Volume) - $Owner = CSVToNode ($Volume) - $Share = CSVToShare ($Volume) - $ReadIOPS = 0 - $WriteIOPS = 0 - $ReadLatency = 0 - $WriteLatency = 0 - $NonZeroRL = 0 - $NonZeroWL = 0 - - $Progress = 55 + ($_ / $Last * 45 ) - Write-Progress -Activity "Processing performance samples" -PercentComplete $Progress - } - - $PerfDetail[$_].Pool = $Pool - $PerfDetail[$_].Owner = $Owner - $PerfDetail[$_].Share = $Share - - $Value = $PerfDetail[$_].Value - - Switch ($PerfDetail[$_].Counter) { - "reads/sec" { $ReadIOPS += $Value } - "writes/sec" { $WriteIOPS += $Value } - "read latency" { $ReadLatency += $Value; if ($Value -gt 0) {$NonZeroRL++} } - "write latency" { $WriteLatency += $Value; if ($Value -gt 0) {$NonZeroWL++} } - default { Write-Warning "Invalid counter $_" } - } - - if ($_ -eq $Last) { - $EndofVolume = $true - } else { - if ($Volume -ne $PerfDetail[$_+1].Volume) { - $EndofVolume = $true - } else { - $EndofVolume = $false - } - } - - if ($EndofVolume) { - $VolumeRow = "" | Select-Object Pool, Volume, Share, ReadIOPS, WriteIOPS, TotalIOPS, ReadLatency, WriteLatency, TotalLatency - $VolumeRow.Pool = $Pool - $VolumeRow.Volume = $Volume - $VolumeRow.Share = $Share - $VolumeRow.ReadIOPS = [int] ($ReadIOPS / $PerfSamples * 10) / 10 - $VolumeRow.WriteIOPS = [int] ($WriteIOPS / $PerfSamples * 10) / 10 - $VolumeRow.TotalIOPS = $VolumeRow.ReadIOPS + $VolumeRow.WriteIOPS - if ($NonZeroRL -eq 0) {$NonZeroRL = 1} - $VolumeRow.ReadLatency = [int] ($ReadLatency / $NonZeroRL * 1000000 ) / 1000 - if ($NonZeroWL -eq 0) {$NonZeroWL = 1} - $VolumeRow.WriteLatency = [int] ($WriteLatency / $NonZeroWL * 1000000 ) / 1000 - $VolumeRow.TotalLatency = [int] (($ReadLatency + $WriteLatency) / ($NonZeroRL + $NonZeroWL) * 1000000) / 1000 - $VolumeRow - } - } - - } else { - Show-Warning "Unable to collect performance information" - $PerfVolume = @() - $PerfDetail = @() - } - - $PerfVolume | Export-Clixml ($Path + "GetVolumePerf.XML") - $PerfDetail | Export-Csv ($Path + "VolumePerformanceDetails.TXT") - } - } - - if ($S2DEnabled -ne $true) { - - try { - if ((([System.Environment]::OSVersion.Version).Major) -ge 10) { - Show-Update "Gathering Get-StorageDiagnosticInfo" - $deleteStorageSubsystem = $false - if (-not (Get-StorageSubsystem -FriendlyName Clustered*)) { - $storageProviderName = (Get-StorageProvider -CimSession $ClusterName |? Manufacturer -match 'Microsoft').Name - $null = Register-StorageSubsystem -ProviderName $storageProviderName -ComputerName $ClusterName -ErrorAction SilentlyContinue - $deleteStorageSubsystem = $true - $storagesubsystemToDelete = Get-StorageSubsystem -FriendlyName Clustered* - } - $destinationPath = Join-Path -Path $Path -ChildPath 'StorageDiagnosticInfo' - if (Test-Path -Path $destinationPath) { - Remove-Item -Path $destinationPath -Recurse -Force - } - $null = New-Item -Path $destinationPath -ItemType Directory - $clusterSubsystem = (Get-StorageSubSystem |? Model -eq 'Clustered Windows Storage').FriendlyName - Stop-StorageDiagnosticLog -StorageSubSystemFriendlyName $clusterSubsystem -ErrorAction SilentlyContinue - if ($IncludeLiveDump) { - Get-StorageDiagnosticInfo -StorageSubSystemFriendlyName $clusterSubsystem -IncludeLiveDump -DestinationPath $destinationPath - } else { - Get-StorageDiagnosticInfo -StorageSubSystemFriendlyName $clusterSubsystem -DestinationPath $destinationPath - } - - if ($deleteStorageSubsystem) { - Unregister-StorageSubsystem -StorageSubSystemUniqueId $storagesubsystemToDelete.UniqueId -ProviderName Windows* - } - } - } - catch { - Show-Warning "Could not gather Get-StorageDiagnosticInfo (cluster down and/or shared storage)`nError = $($_)" - } - } - - Show-Update "GATHERS COMPLETE ($(((Get-Date) - $TodayDate).ToString("m'm's\.f's'")))" -ForegroundColor Green - - } finally { - Stop-Transcript - } - - # Generate Summary report for rapid consumption at analysis time - Show-Update "<<< Generating Summary Report >>>" -ForegroundColor Cyan - $transcriptFile = $Path + "0_CloudHealthSummary.log" - Start-Transcript -Path $transcriptFile -Force - try { - Show-SddcDiagnosticReport -Report Summary -ReportLevel Full $Path - } finally { - Stop-Transcript - } - - # - # Phase 4 - # - - Show-Update "<<< Phase 4 - Compacting files for transport >>>" -ForegroundColor Cyan - - # - # Force GC so that any pending file references are - # torn down. If they live, they will block removal - # of content. - # - - [System.GC]::Collect() - - # time/extension suffix - $ZipSuffix = '-' + (Format-SddcDateTime $TodayDate) + '.ZIP' - - # prepend clustername if live, domain name trimmed away - # we could use $Cluster.Name since it will exist if $ClusterName was created from it, - # but that may seem excessively mysterious) - if ($ClusterName.Length) { - $ZipSuffix = '-' + ($ClusterName.Split('.',2)[0]) + $ZipSuffix - } else { - $ZipSuffix = '-OFFLINECLUSTER' + $ZipSuffix - } - - # ... and full path - $ZipPath = $ZipPrefix + $ZipSuffix - - try { - Add-Type -Assembly System.IO.Compression.FileSystem - [System.IO.Compression.ZipFile]::CreateFromDirectory($Path, $ZipPath, [System.IO.Compression.CompressionLevel]::Optimal, $false) - $ZipPath = Convert-Path $ZipPath - Show-Update "Zip File Name : $ZipPath" - - Show-Update "Cleaning up temporary directory $Path" - Remove-Item -Path $Path -ErrorAction SilentlyContinue -Recurse - - } catch { - Show-Error("Error creating the ZIP file!`nContent remains available at $Path") - } - - Show-Update "Cleaning up CimSessions" - Get-CimSession | Remove-CimSession - - Show-Update "COMPLETE ($(((Get-Date) - $TodayDate).ToString("m'm's\.f's'")))" -ForegroundColor Green -} - -####### -####### -####### -## -# Archive Job Management -## -####### -####### -####### - -<# -.SYNOPSIS - Install the Sddc Diagnostic Module (PrivateCloud.DiagnosticInfo) on the target nodes. - -.DESCRIPTION - Install the Sddc Diagnostic Module (PrivateCloud.DiagnosticInfo) on the target nodes. - - This is done by pushing the current version of the module from the local system to the targets, - not by downloading from a remote location. - -.PARAMETER Cluster - Specifies the cluster to push to. All nodes will receive the module. - -.PARAMETER Node - Specifies the nodes to push to, directly. - -.PARAMETER Force - Forces (re)installation even if the target nodes have the same version as the source. - -.EXAMPLE - Install-SddcDiagnosticModule - - Install the module to all nodes of the current system's cluster. - -.EXAMPLE - Install-SddcDiagnosticModule -Cluster Cluster1 - - Install the module to all nodes of the Cluster1 cluster. - -.EXAMPLE - Install-SddcDiagnosticModule -Node Node1,Node2 - - Install the module to the specified nodes. -#> - -function Install-SddcDiagnosticModule -{ - [CmdletBinding( DefaultParameterSetName = "Cluster" )] - param( - [parameter(ParameterSetName="Cluster", Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [string] $Cluster = '.', - - [parameter(ParameterSetName="Node", Mandatory=$true)] - [ValidateNotNullOrEmpty()] - [string[]] $Node, - - [parameter(ParameterSetName="Cluster", Mandatory=$false)] - [parameter(ParameterSetName="Node", Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [switch] $Force - ) - - switch ($psCmdlet.ParameterSetName) { - "Cluster" { - $Nodes = Get-NodeList -Cluster $Cluster -Filter - } - "Node" { - $Nodes = Get-NodeList -Nodes $Node -Filter - } - } - - # remove the local node if present (self-update) - $Nodes = $Nodes |? { $_ -ne $env:COMPUTERNAME } - - $thisModule = Get-Module $Module -ErrorAction Stop - - $clusterModules = icm $Nodes.Name { - $null = Import-Module -Force $using:Module -ErrorAction SilentlyContinue - Get-Module $using:Module - } - - # build list of nodes which need installation/refresh - $installNodes = @() - $updateNodes = @() - - # start with nodes which lack the module - $Nodes.Name |? { $_ -notin $clusterModules.PsComputerName } |% { $installNodes += $_ } - # now add nodes which are downlevel (or, forced, the same apparent version) - $clusterModules |? { $thisModule.Version -gt $_.Version -or ($Force -and $thisModule.Version -eq $_.Version) } |% { $updateNodes += $_.PsComputerName } - - # warn nodes which are uplevel - $clusterModules |? { $thisModule.Version -lt $_.Version } |% { - Write-Warning "Node $($_.PsComputerName) has an newer version of the $Module module ($($_.Version) > $($thisModule.Version)). Consider installing the updated module on the local system ($env:COMPUTERNAME) and updating the cluster." - } - - if ($installNodes.Count) { Write-Host "New Install to Nodes: $(($installNodes | sort) -join ',')" } - if ($updateNodes.Count) { Write-Host "Update for Nodes : $(($updateNodes | sort) -join ',')" } - - # begin gathering remote install locations - # clean outdated installations if present - - $installPaths = @() - - if ($installNodes.Count -gt 0) { - $installPaths += icm $installNodes { - - # import common functions - . ([scriptblock]::Create($using:CommonFunc)) - - # place in the Install-Module default location - # note we must specify all the way to final destination since we know it does not exist - Write-Output (Get-AdminSharePathFromLocal $env:COMPUTERNAME (Join-Path "$env:ProgramFiles\WindowsPowerShell\Modules\$using:Module" $using:thisModule.Version)) - } - } - - if ($updateNodes.Count -gt 0) { - $installPaths += icm $updateNodes { - - # import common functions - . ([scriptblock]::Create($using:CommonFunc)) - - # wipe outdated install location - Install-Module does not place here, prefer its location - if (Test-Path $env:SystemRoot\System32\WindowsPowerShell\v1.0\Modules\$using:Module) { - - rm -Recurse $env:SystemRoot\System32\WindowsPowerShell\v1.0\Modules\$using:Module -ErrorAction Stop - - # place in the Install-Module default location - Write-Output (Get-AdminSharePathFromLocal $env:COMPUTERNAME (Join-Path "$env:ProgramFiles\WindowsPowerShell\Modules\$using:Module" $using:thisModule.Version)) - - } else { - - $null = Import-Module $using:Module -Force - $m = Get-Module $using:module -ErrorAction Stop - - # unload current and return its location for update - $md = (gi (gi $m.ModuleBase -ErrorAction SilentlyContinue).PsParentPath).FullName - Remove-Module $using:module -ErrorAction SilentlyContinue - - # note we return the parent path - the copy will place the versioned module directory within it - Write-Output (Get-AdminSharePathFromLocal $env:COMPUTERNAME $md) - } - } - } - - # and propagate to the given locations - $installPaths |% { - cp -Recurse $thisModule.ModuleBase $_ -Force -ErrorAction Stop - } -} - -<# -.SYNOPSIS - Confirm versioning of the Sddc Diagnostic module (PrivateCloud.DiagnosticInfo) on the target - nodes. - -.DESCRIPTION - Confirm versioning of the Sddc Diagnostic module (PrivateCloud.DiagnosticInfo) on the target - nodes. - - Warnings will be generated for nodes which do not have the module or have versions different - from the one on the local system. Use Install-SddcDiagnosticModule to push updates. - -.PARAMETER Cluster - Specifies the cluster. All nodes will be validated. - -.PARAMETER Node - Specifies the nodes to validate directly. - -.EXAMPLE - Confirm-SddcDiagnosticModule - - Validate versions installed across the cluster the local system is a member of. - -.EXAMPLE - Confirm-SddcDiagnosticModule -Cluster Cluster1 - - Validate versions installed across the Cluster1 cluster. -#> - -function Confirm-SddcDiagnosticModule -{ - [CmdletBinding()] - param( - [parameter(ParameterSetName="Cluster", Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [string] $Cluster = '.', - - [parameter(ParameterSetName="Node", Mandatory=$true)] - [ValidateNotNullOrEmpty()] - [string[]] $Node - ) - - switch ($psCmdlet.ParameterSetName) { - "Cluster" { - $Nodes = Get-NodeList -Cluster $Cluster -Filter - } - "Node" { - $Nodes = Get-NodeList -Nodes $Node -Filter - } - } - - $thisModule = Get-Module $Module -ErrorAction Stop - - $clusterModules = icm $Nodes.Name { - $null = Import-Module -Force $using:Module -ErrorAction SilentlyContinue - Get-Module $using:Module - } - - $Nodes.Name |? { $_ -notin $clusterModules.PsComputerName } |% { - Write-Warning "Node $_ does not have the $Module module. Please 'Install-SddcDiagnosticModule -Node $_' to address." - } - $clusterModules |? { $thisModule.Version -gt $_.Version } |% { - Write-Warning "Node $($_.PsComputerName) has an older version of the $Module module ($($_.Version) < $($thisModule.Version)). Please 'Install-SddcDiagnosticModule -Node $_' to address." - } - $clusterModules |? { $thisModule.Version -lt $_.Version } |% { - Write-Warning "Node $($_.PsComputerName) has an newer version of the $Module module ($($_.Version) > $($thisModule.Version)). Consider installing the updated module on the local system ($env:COMPUTERNAME) and updating the cluster." - } - - $clusterModules -} - -<# -.SYNOPSIS - Perform garbage collection on the local node's Sddc Diagnostic Archive. - -.DESCRIPTION - Perform garbage collection on the local node's Sddc Diagnostic Archive. - - This is an INTERNAL utililty command, used by the clustered scheduled task which performs the - Sddc Diagnostic Archive. It is not intended for direct use. - -.PARAMETER ArchivePath - Specifies the path to the archive to garbage collect. - -.EXAMPLE - Limit-SddcDiagnosticArchive -ArchivePath C:\Windows\SddcDiagnosticArchive - - Perform garbage collection on the content of the specified directory. -#> - -function Limit-SddcDiagnosticArchive -{ - param( - [parameter(Mandatory=$true)] - [ValidateNotNullOrEmpty()] - [string] $ArchivePath - ) - - $Days = $null - $Size = $null - Get-SddcDiagnosticArchiveJobParameters -Days ([ref] $Days) -Size ([ref] $Size) - - Show-Update "Applying limits to SDDC Archive @ $ArchivePath : $Days Days & $('{0:0.00} MiB' -f ($Size/1MB))" - - # - # Comment/get current state - # - - # note: default sort is ascending, so by our lexically sortable naming convention - # the oldest ZIPs will come first - $f = @(dir $ArchivePath\*.ZIP) | sort - $m = $f | measure -Sum Length - - Show-Update "Begin: $($m.Count) ZIPs which are $('{0:0.00} MiB' -f ($m.Sum/1MB))" - - # - # Day limit - # - - if ($f.Count -gt $Days) { - $ndelete = $f.Count - $Days - Show-Update "Deleting $ndelete days of archive" - - $f[0..($ndelete - 1)] |% { - Show-Update "`tDay limit: Deleting $($_.FullName)" - $_ - } | del -Force - - # re-measure the remaining - $f = $f[$ndelete..$($f.Count - 1)] - $m = $f | measure -Sum Length - } - - # - # Size limit - # - - if ($m.Sum -gt $Size) { - - Show-Update "Deleting $('{0:0.00} MiB' -f ($($m.Sum-$Size)/1MB)) MiB of archive" - - foreach ($file in $f) { - - Show-Update "`tSize limit: Deleting $($file.FullName)" - $m.Sum -= $file.Length - del $file.Fullname -Force - - if ($m.Sum -le $Size) { - break - } - } - } - - # - # Comment final state - # - - $f = @(dir $ArchivePath\*.ZIP) | sort - $m = $f | measure -Sum Length - - Show-Update "End: $($m.Count) ZIPs which are $('{0:0.00} MiB' -f ($m.Sum/1MB))" -} - -<# -.SYNOPSIS - Perform a new capture to the local node's Sddc Diagnostic Archive. - -.DESCRIPTION - Perform a new capture to the local node's Sddc Diagnostic Archive. - - This is an INTERNAL utililty command, used by the clustered scheduled task which performs the - Sddc Diagnostic Archive. It is not intended for direct use. - -.PARAMETER ArchivePath - Specifies the path to the archive. - -.EXAMPLE - Update-SddcDiagnosticArchive -ArchivePath C:\Windows\SddcDiagnosticArchive - - Capture content to the specified directory. -#> - -function Update-SddcDiagnosticArchive -{ - param( - [parameter(Mandatory=$true)] - [ValidateNotNullOrEmpty()] - [string] $ArchivePath - ) - - # get timestamp at the top, reflecting job launch time - $TimeStamp = Get-Date - - # Scrub in just in case - $CapturePath = (Join-Path $ArchivePath "Capture") - rm -r $CapturePath -Force -ErrorAction SilentlyContinue - $null = mkdir $CapturePath -Force -ErrorAction Stop - - # - # Capture - # - - # 25 hour capture of events - Get-SddcCapturedEvents $CapturePath 25 |% { - Show-Update "Captured: $_" - } - - # 25 hour capture of cluster/health logs - try { - - if ($c = Get-Cluster) { - - $f = Get-ClusterLog -Node $env:COMPUTERNAME -Destination $CapturePath -UseLocalTime -TimeSpan (25 * 60) - Show-Update "Captured: $($f.FullName)" - if ($c.S2DEnabled) { - $f = Get-ClusterLog -Node $env:COMPUTERNAME -Destination $CapturePath -Health -UseLocalTime -TimeSpan (25 * 60) - Show-Update "Captured: $($f.FullName)" - } - } - } catch { - - Show-Update "Cluster/Health Logs not captured" - } - - # - # Compress - # - - $ZipFile = 'SddcDiagnosticArchive-' + $env:COMPUTERNAME + '-' + (Format-SddcDateTime ($TimeStamp)) + '.ZIP' - $ZipPath = (join-path $ArchivePath $ZipFile) - - try { - Add-Type -Assembly System.IO.Compression.FileSystem - [System.IO.Compression.ZipFile]::CreateFromDirectory($CapturePath, $ZipPath, [System.IO.Compression.CompressionLevel]::Optimal, $false) - Show-Update "Zip File Name : $ZipPath" - } catch { - Show-Error "Error creating the ZIP file!" $_ - } - - # Scrub out - rm -r $CapturePath -Force -ErrorAction SilentlyContinue -} - -<# -.SYNOPSIS - Query for Sddc Diagnostic Archive job parameters. - -.DESCRIPTION - Query for Sddc Diagnostic Archive job parameters. [ref] parameters must be specified. - - This is an INTERNAL utililty command, used by the clustered scheduled task which performs the - Sddc Diagnostic Archive. It is not intended for direct use. - - Use Show-SddcDiagnosticArchiveJob to query & show the state of the archive job on a target set - of systems. - -.PARAMETER Cluster - Specifies the cluster from which parameters should be queried. - -.PARAMETER Days - Receives the days of archive to maintain. - -.PARAMETER Path - Receives the path to the archive (valid only on local system) - -.PARAMETER Size - Receives the maximum size of the archive to maintain (bytes) - -.PARAMETER At - Receives the time of day that the archive update job is configured to run. - -.EXAMPLE - Get-SddcDiagnosticArchiveJobParameters -Days ([ref] $d) - - Receives the days of archive configured for the cluster the local system is a member of. -#> - -function Get-SddcDiagnosticArchiveJobParameters -{ - param( - [parameter(Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [string] $Cluster = '.', - - [parameter(Mandatory=$false)] - [ref] $Days, - - [parameter(Mandatory=$false)] - [ref] $Path, - - [parameter(Mandatory=$false)] - [ref] $Size, - - [parameter(Mandatory=$false)] - [ref] $At - ) - - $c = Get-Cluster -Name $Cluster -ErrorAction Stop - - if ($PSBoundParameters.ContainsKey('Days')) { - try { - $Days.Value = ($c | Get-ClusterParameter -Name SddcDiagnosticArchiveDays -ErrorAction Stop).Value - } catch { - $Days.Value = 60 - } - } - - if ($PSBoundParameters.ContainsKey('Path')) { - try { - $Path.Value = ($c | Get-ClusterParameter -Name SddcDiagnosticArchivePath -ErrorAction Stop).Value - } catch { - $Path.Value = Join-Path $env:SystemRoot "SddcDiagnosticArchive" - } - } - - if ($PSBoundParameters.ContainsKey('Size')) { - try { - $Size.Value = ($c | Get-ClusterParameter -Name SddcDiagnosticArchiveSize -ErrorAction Stop).Value - } catch { - $Size.Value = 500MB - } - } - - if ($PSBoundParameters.ContainsKey('At')) { - try { - $Task = Get-ClusteredScheduledTask -Cluster $c.Name -TaskName SddcDiagnosticArchive -ErrorAction Stop - - # may be overaggresive, there should only be one trigger if we define it - $At.Value = [datetime] ($Task.TaskDefinition.Triggers[0].StartBoundary) - } catch { - $At.Value = [datetime] '3AM' - } - } -} - -<# -.SYNOPSIS - Set Sddc Diagnostic Archive job parameters. - -.DESCRIPTION - Set Sddc Diagnostic Archive job parameters. - - Use this command to change the default archive location and garbage collection controls (days - of archive and its maximum size). - - Use the Register-SddcDiagnosticArchiveJob to change the launch time. - -.PARAMETER Cluster - Specifies the cluster for which parameters will be set. - -.PARAMETER Days - Specifies the days of archive to maintain. This limit will be applied during the next archive - job execution. - -.PARAMETER Path - Specifies the path to create the archive at. Ensure that this path is available on all systems. - By default the archive will be placed at $env:SystemRoot\SddcDiagnosticArchive - -.PARAMETER Size - Specifies the maximum size of the archive (in bytes). This limit will be applied during the next - archive job execution. - -.EXAMPLE - Set-SddcDiagnosticArchiveJobParameters -Days 14 - - Sets the maximum days of archive to two weeks. -#> - -function Set-SddcDiagnosticArchiveJobParameters -{ - param( - [parameter(Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [string] $Cluster = '.', - - [parameter(Mandatory=$false)] - [ValidateRange(1,365)] - [int] $Days, - - [parameter(Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [string] $Path, - - [parameter(Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [uint64] $Size - ) - - $c = Get-Cluster -Name $Cluster -ErrorAction Stop - - # note: we could rewrite paths which are prefixed with recognizably $env:systemroot and other - # canonical paths with macros that we can expand at the destination node. strictly speaking these are - # not guaranteed to be identical though its extremely unlikely we'll find that condition in practice. - - - if ($PSBoundParameters.ContainsKey('Days')) { - $c | Set-ClusterParameter -Name SddcDiagnosticArchiveDays -Create -Value $Days -ErrorAction Stop - } - if ($PSBoundParameters.ContainsKey('Path')) { - if ($Path[1] -ne ':') { - Write-Error 'Path must be specified as an absolute path (:\some\path)' - } else { - $c | Set-ClusterParameter -Name SddcDiagnosticArchivePath -Create -Value $Path -ErrorAction Stop - } - } - if ($PSBoundParameters.ContainsKey('Size')) { - $c | Set-ClusterParameter -Name SddcDiagnosticArchiveSize -Create -Value $Size -ErrorAction Stop - } - - # note, the scheduled start time is only modified at register time -} - -<# -.SYNOPSIS - Show the state of the Sddc Diagnostic Archive job. - -.DESCRIPTION - Show the state of the Sddc Diagnostic Archive job. - - Use this command to generate a report on the location and garbage collection parameters for the - archive on the target cluster, along with space used on each node. - -.PARAMETER Cluster - Specifies the cluster to query. - -.EXAMPLE - Show-SddcDiagnosticArchiveJob -Cluster Cluster1 - - Shows the state of the archive job on cluster Cluster1 -#> - -function Show-SddcDiagnosticArchiveJob -{ - param( - [parameter(Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [string] $Cluster = '.' - ) - - $c = Get-Cluster -Name $Cluster -ErrorAction Stop - - # continue if present, else error - if (-not (Get-ClusteredScheduledTask -Cluster $c.Name |? TaskName -eq SddcDiagnosticArchive)) { - Show-Error "SddcDiagnosticArchive job not currently registered" - } - - $Days = $null - $Path = $null - $Size = $null - $At = $null - - Get-SddcDiagnosticArchiveJobParameters -Cluster $c.Name -Days ([ref] $Days) -Path ([ref] $Path) -Size ([ref] $Size) -At ([ref] $At) - - Write-Output "Target archive size per node : $('{0:0.00} MiB' -f ($Size/1MB))" - Write-Output "Target days of archive : $Days" - Write-Output "Capture to path : $Path" - Write-Output "Capture at : $($At.ToString("h:mm tt"))" - - $Nodes = Get-NodeList -Cluster $Cluster -Filter - - Write-Output "$('-'*20)`nPer Node Report" - $j = $Nodes | sort Name |% { - icm $_.Name -AsJob { - - Import-Module $using:Module -ErrorAction SilentlyContinue - - # import common functions - . ([scriptblock]::Create($using:CommonFunc)) - - if (Test-SddcModulePresence) { - - $Path = $null - Get-SddcDiagnosticArchiveJobParameters -Path ([ref] $Path) - - dir $Path\*.ZIP -ErrorAction SilentlyContinue | measure -Sum Length - } - } - } - - $null = $j | Wait-Job - $j | sort Location |% { - - $m = Receive-Job $_ - Remove-Job $_ - - # note we will not have a measurement if the remote node lacks the module - # a warning will have already been passed to the output in this case - if ($m) { - Write-Output "Node $($_.Location): $($m.Count) ZIPs which are $('{0:0.00} MiB' -f ($m.Sum/1MB))" - } - } -} - -<# -.SYNOPSIS - Unregister (remove) the Sddc Diagnostic Archive job. - -.DESCRIPTION - Unregister (remove) the Sddc Diagnostic Archive job. - - This removes all configured parameters and the Sddc Diagnostic Archive clustered scheduled task. - It does not remove the Sddc Diagnostic Archives themselves. - -.PARAMETER Cluster - Specifies the target cluster. - -.EXAMPLE - Unregister-SddcDiagnosticArchiveJob -Cluster Cluster1 - - Removes the Sddc Diagnostic Archive job from cluster Cluster1 -#> - -function Unregister-SddcDiagnosticArchiveJob -{ - param( - [parameter(Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [string] $Cluster = '.' - ) - - $c = Get-Cluster -Name $Cluster -ErrorAction Stop - - # silently delete parameters, if set away from defaults - $c | Set-ClusterParameter -Name SddcDiagnosticArchiveDays -Delete -ErrorAction SilentlyContinue - $c | Set-ClusterParameter -Name SddcDiagnosticArchivePath -Delete -ErrorAction SilentlyContinue - $c | Set-ClusterParameter -Name SddcDiagnosticArchiveSize -Delete -ErrorAction SilentlyContinue - - # unregister if present, else error - if (Get-ClusteredScheduledTask -Cluster $c.Name |? TaskName -eq SddcDiagnosticArchive) { - Unregister-ClusteredScheduledTask -Cluster $c.Name -TaskName SddcDiagnosticArchive -ErrorAction Stop - } else { - Show-Error "SddcDiagnosticArchive job not currently registered" - } -} - -<# -.SYNOPSIS - Register the Sddc Diagnostic Archive job. - -.DESCRIPTION - Register the Sddc Diagnostic Archive job. - - This creates the Sddc Diagnostic Archive clustered scheduled task on the target cluster. Use - Set-SddcDiagnosticArchiveJobParameters to change the default location and garbage collection - options. Use Show-SddcDiagnosticArchiveJob to verify the state of the job and its parameters. - - Re-registering can be used to change the start time for the job. This does not affect other - configured parameters, and does not create an additional instance of the job. - -.PARAMETER Cluster - Specifies the target cluster. - -.PARAMETER At - Specifies the time to launch the job (1/day). - -.EXAMPLE - Register-SddcDiagnosticArchiveJob -Cluster Cluster1 - - Creates the Sddc Diagnostic Archive job on cluster Cluster1 with default location and garbage - collection parameters. - -.EXAMPLE - Register-SddcDiagnosticArchiveJob -At 4:30AM - - Creates the Sddc Diagnostic Archive job, launching at 4:30AM each morning. -#> - -function Register-SddcDiagnosticArchiveJob -{ - param( - [parameter(Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [string] $Cluster = '.', - - [parameter(Mandatory=$false)] - [ValidateNotNullOrEmpty()] - [datetime] $At = '3AM' - ) - - $c = Get-Cluster -Name $Cluster -ErrorAction Stop - - # the scheduled task script itself - $scr = { - $Module = 'PrivateCloud.DiagnosticInfo' - Import-Module $Module - - $Path = (Get-Cluster -Name . -ErrorAction Stop | Get-ClusterParameter -Name SddcDiagnosticArchivePath -ErrorAction Stop).Value - $null = mkdir -Force $Path -ErrorAction SilentlyContinue - - $LogFile = Join-Path $Path "SddcDiagnosticArchive.log" - - # trim log - $ntail = $null - $limit = 10MB - if (($l = gi $LogFile -ErrorAction SilentlyContinue) -and - $l.Length -gt $limit) { - - $LogFileTmp = Join-Path $Path "SddcDiagnosticArchive.log.tmp" - - # note: transcripts are produced in plain ASCII - # estimate the #lines in the tail of the file which ~10MB allows for - $ntail = [int] ((gc $LogFile | measure).Count * ($limit/$l.length)) - gc $LogFile -Tail $ntail | Out-File -Encoding ascii -Width 9999 $LogFileTmp - del $LogFile - move $LogFileTmp $LogFile - } - - Start-Transcript -Path $LogFile -Append - - if ($ntail) { - Write-Output "Truncated $LogFile to $ntail lines ($('{0:0.00} MiB' -f ($limit/1MB)) limit)" - } - - if (-not (Get-Module $Module)) { - Write-Output "Module $Module not installed - exiting, cannot capture" - } else { - - try { - Update-SddcDiagnosticArchive $Path - Limit-SddcDiagnosticArchive $Path - } catch { - Write-Error "$(Get-Date -format 's') : SDDC Diagnostic Archive job failed." - throw $_ - } - } - - Stop-Transcript - } - - # use the encoded form to mitigate quoting complications that full scriptblock transfer exposes - $encscr = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes("& { $scr }")) - $arg = "-NoProfile -NoLogo -NonInteractive -WindowStyle Hidden -ExecutionPolicy Bypass -EncodedCommand $encscr" - - $action = New-ScheduledTaskAction -Execute "powershell.exe" -Argument $arg - $trigger = New-ScheduledTaskTrigger -Daily -At $At - - Unregister-ClusteredScheduledTask -Cluster $c.Name -TaskName SddcDiagnosticArchive -ErrorAction SilentlyContinue - Register-ClusteredScheduledTask -Cluster $c.Name -Action $action -Trigger $trigger -TaskName SddcDiagnosticArchive -TaskType ClusterWide -Description "Get-SddcDiagnosticInfo Periodic Diagnostic Archive Task" -} - -function Show-StorageCounters -{ - Param ( - [parameter(Position=0, Mandatory=$true)] - [ValidateNotNullOrEmpty()] - [string] $Path, - [Parameter (Mandatory = $false)] - [bool] $showerr = $false, - [Parameter (Mandatory = $false)] - [int] $delta = 0 - ) - - if (-not (Test-Path $Path)) { - Write-Error "Path is not accessible. Please check and try again: $Path" - return - } - - $d=import-counter -Path $path\"GetCounters.blg" - - $tabName="Cache Perf" - $cachetable=new-object System.Data.DataTable "$tabName" - - #Define Columns - $col1 = New-Object system.Data.DataColumn Node,([string]) - $col2 = New-Object system.Data.DataColumn CacheHits,([string]) - $col3 = New-Object system.Data.DataColumn CacheMiss,([string]) - $col4 = New-Object system.Data.DataColumn DiskReads,([string]) - $col5 = New-Object system.Data.DataColumn DirectReads,([string]) - - $col6 = New-Object system.Data.DataColumn DiskWrites,([string]) - $col7 = New-Object system.Data.DataColumn DirectWrites,([string]) - $col8 = New-Object system.Data.DataColumn CacheWrites,([string]) - - $tabName="Error Table" - $errtable=new-object System.Data.DataTable "$tabName" - - $col9 = New-Object system.Data.DataColumn Node,([string]) - $col10 = New-Object system.Data.DataColumn WriteError,([string]) - $col11 = New-Object system.Data.DataColumn WriteMedia,([string]) - $col12 = New-Object system.Data.DataColumn ReadTimeout,([string]) - $col13 = New-Object system.Data.DataColumn ReadMedia,([string]) - - #Add the Columns - $cachetable.columns.add($col1) - $cachetable.columns.add($col2) - $cachetable.columns.add($col3) - $cachetable.columns.add($col4) - $cachetable.columns.add($col5) - $cachetable.columns.add($col6) - $cachetable.columns.add($col7) - $cachetable.columns.add($col8) - - $errtable.columns.add($col9) - $errtable.columns.add($col10) - $errtable.columns.add($col11) - $errtable.columns.add($col12) - $errtable.columns.add($col13) - - - $tabName="CSV Clusport Perf" - $table=new-object System.Data.DataTable "$tabName" - - #Define Columns - $col1 = New-Object system.Data.DataColumn Node,([string]) - $col2 = New-Object system.Data.DataColumn CSVReadIOPS,([string]) - $col3 = New-Object system.Data.DataColumn CSVReadLatency,([string]) - $col4 = New-Object system.Data.DataColumn CSVWriteIOPS,([string]) - $col5 = New-Object system.Data.DataColumn CSVWriteLatency,([string]) - - $tabName="SBL Perf" - $sbltable=new-object System.Data.DataTable "$tabName" - $col6 = New-Object system.Data.DataColumn Node,([string]) - $col7 = New-Object system.Data.DataColumn SBLReadIOPS,([string]) - $col8 = New-Object system.Data.DataColumn SBLReadLatency,([string]) - $col9 = New-Object system.Data.DataColumn SBLWriteIOPS,([string]) - $col10 = New-Object system.Data.DataColumn SBLWriteLatency,([string]) - $col11 = New-Object system.Data.DataColumn SBLLocalRead,([string]) - $col12 = New-Object system.Data.DataColumn SBLLocalWrite,([string]) - $col13 = New-Object system.Data.DataColumn SBLRemoteRead,([string]) - $col14 = New-Object system.Data.DataColumn SBLRemoteWrite,([string]) - - #Add the Columns - $table.columns.add($col1) - $table.columns.add($col2) - $table.columns.add($col3) - $table.columns.add($col4) - $table.columns.add($col5) - - $sbltable.columns.add($col6) - $sbltable.columns.add($col7) - $sbltable.columns.add($col8) - $sbltable.columns.add($col9) - $sbltable.columns.add($col10) - $sbltable.columns.add($col11) - $sbltable.columns.add($col12) - $sbltable.columns.add($col13) - $sbltable.columns.add($col14) - - $tabName="Hybrid IO Profile" - $ioprofiletable=new-object System.Data.DataTable "$tabName" - - #Define Columns - $col1 = New-Object system.Data.DataColumn Node,([string]) - $col2 = New-Object system.Data.DataColumn IOProfileRead,([string]) - $col3 = New-Object system.Data.DataColumn IOProfileWrites,([string]) - - $ioprofiletable.columns.add($col1) - $ioprofiletable.columns.add($col2) - $ioprofiletable.columns.add($col3) - - if ($delta -ne 0) { - $sample = $delta - } else { - $sample=0 - } - - do { - - $csvreads=$d[$sample].CounterSamples | where { $_.path -Like "*cluster csvfs(_total)\reads/sec*"} - $csvwrites=$d[$sample].CounterSamples | where { $_.path -Like "*cluster csvfs(_total)\writes/sec*"} - $csvwritelat=$d[$sample].CounterSamples | where { $_.path -Like "*cluster csvfs(_total)\avg. sec/write"} - $csvreadlat=$d[$sample].CounterSamples | where { $_.path -Like "*cluster csvfs(_total)\avg. sec/read"} - $csvnodes=$csvreads.Path - - $sblreads=$d[$sample].CounterSamples | where { $_.path -Like "*cluster disk counters(_total)\read/sec*"} - $sblwrites=$d[$sample].CounterSamples | where { $_.path -Like "*cluster disk counters(_total)\writes/sec*"} - $sbllocalreads=$d[$sample].CounterSamples | where { $_.path -Like "*cluster disk counters(_total)\Local: read/sec*"} - $sbllocalwrites=$d[$sample].CounterSamples | where { $_.path -Like "*cluster disk counters(_total)\Local: writes/sec*"} - $sblremotereads=$d[$sample].CounterSamples | where { $_.path -Like "*cluster disk counters(_total)\Remote: read/sec*"} - $sblremotewrites=$d[$sample].CounterSamples | where { $_.path -Like "*cluster disk counters(_total)\Remote: writes/sec*"} - $sblwritelat=$d[$sample].CounterSamples | where { $_.path -Like "*cluster disk counters(_total)\write latency"} - $sblreadlat=$d[$sample].CounterSamples | where { $_.path -Like "*cluster disk counters(_total)\read latency"} - $sblnodes=$sblreads.Path - - $cachehits=$d[$sample].CounterSamples | where { $_.path -Like "*cluster storage hybrid disks(_total)\cache hit reads/sec*"} - $cachemiss=$d[$sample].CounterSamples | where { $_.path -Like "*cluster storage hybrid disks(_total)\cache miss reads/sec*"} - $diskreads=$d[$sample].CounterSamples | where { $_.path -Like "*cluster storage hybrid disks(_total)\disk reads/sec"} - $directreads=$d[$sample].CounterSamples | where { $_.path -Like "*cluster storage hybrid disks(_total)\direct reads/sec"} - - $diskwrites=$d[$sample].CounterSamples | where { $_.path -Like "*cluster storage hybrid disks(_total)\disk writes/sec*"} - $directwrites=$d[$sample].CounterSamples | where { $_.path -Like "*cluster storage hybrid disks(_total)\direct writes/sec*"} - $cachewrites=$d[$sample].CounterSamples | where { $_.path -Like "*cluster storage hybrid disks(_total)\cache writes/sec"} - - $writeerror=$d[$sample].CounterSamples | where { $_.path -Like "*cluster storage hybrid disks(_total)\write errors total*"} - $writemedia=$d[$sample].CounterSamples | where { $_.path -Like "*cluster storage hybrid disks(_total)\write errors media*"} - $readtimeout=$d[$sample].CounterSamples | where { $_.path -Like "*cluster storage hybrid disks(_total)\read errors timeout*"} - $readmedia=$d[$sample].CounterSamples | where { $_.path -Like "*cluster storage hybrid disks(_total)\read errors media*"} - $cachenodes=$cachehits.Path - - - $diskioreads=$d[$sample].CounterSamples | where { $_.path -like "*cluster storage hybrid disks io profile(_total)\reads/sec total*" } - $diskiowrites=$d[$sample].CounterSamples | where { $_.path -like "*cluster storage hybrid disks io profile(_total)\writes/sec total*" } - $ioprofilenodes = $diskioreads.Path - - $csvreadtotal=0 - $csvwritetotal=0 - $sblreadtotal=0 - $sblwritetotal=0 - $ioprofilereadtotal=0 - $ioprofilewritetotal=0 - - - $cachehittotal =0 - $cachemisstotal=0 - $diskreadtotal=0 - $diskwritetotal=0 - $directreadtotal=0 - $directwritetotal=0 - $cachewritetotal=0 - - $table.Clear() - $cachetable.Clear() - $errtable.Clear() - $sbltable.Clear() - $ioprofiletable.Clear() - - $index=0 - foreach($node in $csvnodes) { - $row = $table.NewRow() - - $pos = $csvnodes[$index].IndexOf("\",2) - #Enter data in the row - $row.Node = $csvnodes[$index].Substring(2,$pos-2) - $row.CSVReadIOPS = $([math]::Round($csvreads[$index].cookedValue,0)) - $row.CSVReadLatency = $([math]::Round($csvreadlat[$index].cookedValue*1000,2)) - $row.CSVWriteIOPS = $([math]::Round($csvwrites[$index].cookedValue,0)) - $row.CSVWriteLatency = $([math]::Round($csvwritelat[$index].cookedValue*1000,2)) - $csvreadtotal += $row.CSVReadIOPS - $csvwritetotal+= $row.CSVWriteIOPS - $table.Rows.Add($row) - $index+=1 - } - - $index=0 - foreach($node in $sblnodes) { - $row = $sbltable.NewRow() - $pos = $sblnodes[$index].IndexOf("\",2) - $row.Node = $sblnodes[$index].Substring(2,$pos-2) - $row.SBLReadIOPS = $([math]::Round($sblreads[$index].cookedValue,0)) - $row.SBLReadLatency = $([math]::Round($sblreadlat[$index].cookedValue*1000,2)) - $row.SBLWriteIOPS = $([math]::Round($sblwrites[$index].cookedValue,0)) - $row.SBLWriteLatency = $([math]::Round($sblwritelat[$index].cookedValue*1000,2)) - $row.SBLLocalRead = $([math]::Round($sbllocalreads[$index].cookedValue,0)) - $row.SBLLocalWrite = $([math]::Round($sbllocalwrites[$index].cookedValue,0)) - $row.SBLRemoteRead = $([math]::Round($sblremotereads[$index].cookedValue,0)) - $row.SBLRemoteWrite = $([math]::Round($sblremotewrites[$index].cookedValue,0)) - - $sblreadtotal+=$row.SBLReadIOPS - $sblwritetotal+=$row.SBLWriteIOPS - - #Add the row to the table - $sbltable.Rows.Add($row) - $index+=1 - } - - $index=0 - foreach($node in $cachenodes) { - $row = $cachetable.NewRow(); - $pos = $cachenodes[$index].IndexOf("\",2) - $row.Node = $cachenodes[$index].Substring(2,$pos-2) - - $row.CacheHits = $([math]::Round($cachehits[$index].cookedValue,0)) - $row.CacheMiss = $([math]::Round($cachemiss[$index].cookedValue,0)) - $row.DiskReads = $([math]::Round($diskreads[$index].cookedValue,0)) - $row.DirectReads = $([math]::Round($directreads[$index].cookedValue,0)) - $row.DiskWrites = $([math]::Round($diskwrites[$index].cookedValue,0)) - $row.DirectWrites = $([math]::Round($directwrites[$index].cookedValue,0)) - $row.CacheWrites = $([math]::Round($cachewrites[$index].cookedValue,0)) - #Add the row to the table - $cachetable.Rows.Add($row) - - $cachehittotal+=$row.CacheHits - $cachemisstotal+=$row.CacheMiss - $diskreadtotal+=$row.DiskReads - $diskwritetotal+=$row.DiskWrites - $directreadtotal+=$row.DirectReads - $directwritetotal+=$row.DirectWrites - $cachewritetotal+=$row.CacheWrites - - if ($showerr) { - $row = $errtable.NewRow(); - $row.Node = $nodes[$index].Substring(2,$pos-2) - $row.WriteError = $([math]::Round($writeerror[$index].cookedValue,0)) - $row.WriteMedia = $([math]::Round($writemedia[$index].cookedValue,0)) - $row.ReadTimeout = $([math]::Round($readtimeout[$index].cookedValue,0)) - $row.ReadMedia = $([math]::Round($readmedia[$index].cookedValue,0)) - - #Add the row to the table - $errtable.Rows.Add($row) - } - - $index+=1 - } - - $index=0 - foreach($node in $ioprofilenodes) { - $row = $ioprofiletable.NewRow() - $pos = $ioprofilenodes[$index].IndexOf("\",2) - $row.Node = $ioprofilenodes[$index].Substring(2,$pos-2) - $row.IOProfileRead = $([math]::Round($diskioreads[$index].cookedValue,0)) - $row.IOProfileWrites = $([math]::Round($diskiowrites[$index].cookedValue,0)) - - $ioprofilereadtotal+=$row.IOProfileRead - $ioprofilewritetotal+=$row.IOProfileWrites - - #Add the row to the table - $ioprofiletable.Rows.Add($row) - $index+=1 - } - - # add Total row - $row = $table.NewRow() - $row.Node = "Total" - $row.CSVReadIOPS = $csvreadtotal - $row.CSVWriteIOPS = $csvwritetotal - #Add the row to the table - $table.Rows.Add($row) - - $row = $sbltable.NewRow() - $row.Node = "Total" - $row.SBLReadIOPS = $sblreadtotal - $row.SBLWriteIOPS= $sblwritetotal - - #Add the row to the table - $sbltable.Rows.Add($row) - - - $row = $cachetable.NewRow() - $row.Node = "Total" - $row.CacheHits = $cachehittotal - $row.CacheMiss = $cachemisstotal - $row.DiskReads = $diskreadtotal - $row.DirectReads = $directreadtotal - $row.DiskWrites = $diskwritetotal - $row.DirectWrites= $directwritetotal - $row.CacheWrites = $cachewritetotal - #Add the row to the table - $cachetable.Rows.Add($row) - - $row = $ioprofiletable.NewRow() - $row.Node = "Total" - $row.IOProfileRead = $ioprofilereadtotal - $row.IOProfileWrites= $ioprofilewritetotal - - #Add the row to the table - $ioprofiletable.Rows.Add($row) - - cls - - #Display the table - write-host "Sample interval " $sample - $table | sort-object Node| format-table -AutoSize - $sbltable | sort-object Node| format-table -AutoSize - $cachetable | sort-object Node| format-table -AutoSize - $ioprofiletable | sort-object Node| format-table -AutoSize - - if ($showerr) { - $errtable | sort-object Node| format-table -AutoSize - } - - $sample+=1 - if ($sample -eq $d.Count) { - $sample=0 - } - - if ($delta -ne 0) { - break - } - - Start-Sleep -Seconds 1 - - } while (1) -} - -function Get-SpacesTimeline - -{ - - # aliases usage in this module is idiomatic, only using defaults - - [Diagnostics.CodeAnalysis.SuppressMessageAttribute("PSAvoidUsingCmdletAliases", "")] - - param( - - [parameter(Position=0, Mandatory=$true)] - - [ValidateNotNullOrEmpty()] - - [string] - - $Path, - - [parameter(Mandatory=$true)] - - [ValidateNotNullOrEmpty()] - - [string] - - $VirtualDiskId - - ) - - $VirtualDiskFilePath = Join-Path $Path "GetVirtualDisk.XML" - $ClusterNodeFilePath = Join-Path $Path "GetClusterNode.XML" - - if ((-not (Test-Path $VirtualDiskFilePath)) -or (-not (Test-Path $ClusterNodeFilePath))) - { - Write-Error "Path is not valid or collection files are not present. Please check and try again: $Path" - return - } - - $VirtualDisks = Import-ClixmlIf ($VirtualDiskFilePath) - $ClusterNodes = Import-ClixmlIf ($ClusterNodeFilePath) - - $OperationalLog = "Microsoft-Windows-StorageSpaces-Driver-Operational.EVTX" - $DiagnosticLog = "Microsoft-Windows-StorageSpaces-Driver-Diagnostic.EVTX" - - $eventshash = @{} - - foreach ($node in $ClusterNodes) - { - $nodeName = $node.Name - $OperationalLogPath = Join-Path (Get-NodePath $Path $nodeName) $OperationalLog - $DiagnosticLogPath = Join-Path (Get-NodePath $Path $nodeName) $DiagnosticLog - - foreach ($VirtualDisk in $VirtualDisks) - { - $id = $VirtualDisk.ObjectId.Split(":")[2].Split("}")[1] + "}" - - if ($VirtualDiskId -ne $id.Trim("{}")) - { - continue; - } - - $eventFilter = "EventID=1008 or EventID=1009 or EventID=1021 or EventID=1022" - - $query = "*[System[($eventFilter)]] and *[EventData[Data[@Name='Id'] and (Data='$id')]]" - - $events = Get-WinEvent -Path $DiagnosticLogPath -FilterXPath $query -ErrorAction SilentlyContinue - $events | % { $_ | Add-Member NodeName $nodeName} - - if ($events) - { - $eventshash[$id] += $events; - } - } - } - - Add-Type -AssemblyName System.Windows.Forms - Add-Type -AssemblyName System.Windows.Forms.DataVisualization - - $Title = "Storage Spaces State Timeline" - - $chart = New-object Windows.Forms.DataVisualization.Charting.Chart - $chart.Anchor = [Windows.Forms.AnchorStyles]::Bottom -bor - [Windows.Forms.AnchorStyles]::Right -bor - [Windows.Forms.AnchorStyles]::Top -bor - [Windows.Forms.AnchorStyles]::Left - - $chart.Width = 1000 - $chart.Height = 800 - $chart.Left = 40 - $chart.Top = 30 - $chart.BackColor = [Drawing.Color]::White - [void]$chart.Titles.Add($Title) - $chart.Titles[0].Font = "segoeuilight,12pt" - - # - # Create a chart area to draw on - # - - $chartArea = New-Object Windows.Forms.DataVisualization.Charting.ChartArea - $chartarea.Name = "TimeSeries" - $chartarea.AxisX.IntervalType = [Windows.Forms.DataVisualization.Charting.DateTimeIntervalType]::Hours - $chartarea.AxisX.IntervalAutoMode = [Windows.Forms.DataVisualization.Charting.IntervalAutoMode]::VariableCount - $chartarea.AxisX.MajorGrid.Enabled = $false - $chartarea.AxisX.LabelStyle.Format = "yyyy/MM/dd h tt" - $chartarea.AxisX.ScaleView.Zoomable = $true - $chartarea.AxisX.ScrollBar.IsPositionedInside = $true - $chartarea.AxisX.ScrollBar.ButtonStyle = [Windows.Forms.DataVisualization.Charting.ScrollBarButtonStyles]::All - $chartarea.CursorX.IsUserEnabled = $true - $chartarea.CursorX.IsUserSelectionEnabled = $true - $chartarea.CursorX.IntervalType = [Windows.Forms.DataVisualization.Charting.DateTimeIntervalType]::Hours - $chartarea.CursorX.AutoScroll = $true - $chartArea.AxisY.Title = "State" - $chartArea.AxisY.TitleFont = "segoeuilight,12pt" - $chartarea.AxisY.LabelStyle.Format = "N0" - $chartarea.AxisY.MinorGrid.Enabled = $true - $chartarea.AxisY.MinorGrid.LineDashStyle = [Windows.Forms.DataVisualization.Charting.ChartDashStyle]::Dot - - $chart.ChartAreas.Add($chartArea) - - foreach ($key in $eventshash.Keys) - { - if ($key.Trim("{}") -ne $VirtualDiskId) - { - continue; - } - - $eventsHashSortTime = $eventshash[$key] | sort TimeCreated - - foreach ($i in $eventsHashSortTime) - { - - $point = New-Object Windows.Forms.DataVisualization.Charting.DataPoint - $point.Color = [Drawing.Color]::Green - - if ($i.Id -eq 1008) - { - $startTime = $i.TimeCreated - $seriesName = "State" + $startTime - [void]$chart.Series.Add($seriesName) - $endTime = $null - } - if ($i.Id -eq 1009) - { - $endTime = $i.TimeCreated - $startTime = $null - } - - if ($i.Id -eq 1021) - { - $value = 20 - $seriesName = "Attach" + $i.TimeCreated - $point.SetValueXY($i.TimeCreated, $value) - $point.Tooltip = "Attached" + - "At: #VALX{MM/dd/yyyy h:mm:ss tt}\n" + - "NodeName: $($i.NodeName)" - $point.Color = [Drawing.Color]::Red - $chart.Series[$seriesName].Points.Add($point) - } - - if ($i.Id -eq 1021) - { - $value = 20 - $seriesName = "Detached" + $i.TimeCreated - $point.SetValueXY($i.TimeCreated, $value) - $point.Tooltip = "Detached" + - "At: #VALX{MM/dd/yyyy h:mm:ss tt}\n" + - "NodeName: $($i.NodeName)" - $point.Color = [Drawing.Color]::Red - $chart.Series[$seriesName].Points.Add($point) - } - - $chart.Series[$seriesName].ChartType = [Windows.Forms.DataVisualization.Charting.SeriesChartType]::Line - $chart.Series[$seriesName].XValueType = [Windows.Forms.DataVisualization.Charting.ChartValueType]::DateTime - $chart.Series[$seriesName].MarkerStyle = [Windows.Forms.DataVisualization.Charting.MarkerStyle]::Circle - - if ($startTime -ne $null) - { - $value = 10 - $point.SetValueXY($i.TimeCreated, $value) - $point.Tooltip = "Regen progressing" + - "At: #VALX{MM/dd/yyyy h:mm:ss tt}\n" + - "NodeName: $($i.NodeName)" - $chart.Series[$seriesName].Points.Add($point) - $startTime = $null - } - if ($endTime -ne $null) - { - $value = 20 - $point.SetValueXY($i.TimeCreated, $value) - $point.Tooltip = "RegenCompleted " + - "At: #VALX{MM/dd/yyyy h:mm:ss tt}\n" + - "NodeName: $($i.NodeName)" - $chart.Series[$seriesName].Points.Add($point) - $startTime = $null - $endTime = $null - } - } - } - - $form = New-Object Windows.Forms.Form - $form.Text = "Storage Chart plotting space timeline" - $form.Width = 1100 - $form.Height = 900 - $form.controls.add($chart) - $form.Add_Shown({$form.Activate()}) - - [void]$form.ShowDialog() - } - -####### -####### -####### -## -# Reporting -## -####### -####### -####### - -enum ReportLevelType -{ - Summary = 0 - Standard - Full -} - -# Report Types. Ordering here is reflects output ordering when multiple reports are specified. - -enum ReportType -{ - All = 0 - Summary - SmbConnectivity - StorageBusCache - StorageBusConnectivity - StorageLatency - StorageFirmware - LSIEvent -} - -# helper function to parse the csv-demarcated sections of the cluster log -# return value is a hashtable indexed by section name - -function Get-ClusterLogDataSource -{ - # aliases usage in this module is idiomatic, only using defaults - [Diagnostics.CodeAnalysis.SuppressMessageAttribute("PSAvoidUsingCmdletAliases", "")] - param( - [string] $logname - ) - - BEGIN { - $csvf = New-TemporaryFile - $sr = [System.IO.StreamReader](gi $logname).FullName - $datasource = @{} - } - - PROCESS { - - ## - # Parse cluster log for all csv datasources. Recognize by a heuristic of >4 comma-seperated values - # immediately after the block header [=== name ===] - # - # Final line to parse is the System block, which is after all potential datasources. - ## - - $firstline = $false - $in = $false - $section = $null - - do { - - $l = $sr.ReadLine() - - # Heuristic ... - # SBL Disks comes before System - - if ($in) { - - # if first line of section, detect if CSV - if ($firstline) { - - $firstline = $false - - #if not csv, go back to looking for blocks - if (($l -split ',').count -lt 4) { - $in = $false - } else { - - # bug workaround - # the Resources section has a duplicate _embeddedFailureAction - # rename the first to an ignore per DaUpton - # using the non-greedy match gives us the guarantee of picking out the first instance - - if ($section -eq 'Resources' -and $l -match '^(.*?)(_embeddedFailureAction)(.*)$') { - $l = $matches[1]+"ignore"+$matches[3] - } - - # number all ignore fields s.t. duplicates become unique (Networks section) - $n = 0 - while ($l -match '^(.*?)(,ignore,)(.*)$') { - $l = $matches[1]+",ignore$n,"+$matches[3] - $n += 1 - } - - # place in csv temporary file - $l | out-file -Encoding ascii -Width 9999 $csvf - } - - } else { - - # parsing - # in section, blank line terminates - if ($l -notmatch '^\s*$') { - $l | out-file -Append -Encoding ascii -Width 9999 $csvf - } else { - # at end; parse was good - # import the csv and insert into the datasource table - $datasource[$section] = import-csv $csvf - - # reset parser - $in = $false - $section = $null - } - } - - } elseif ($l -match '^\[===\s(.*)\s===\]') { - - # done at the start of the System block - if ($matches[1] -eq 'System') { break } - - # otherwise prepare to parse - $section = $matches[1] - $in = $true - $firstline = $true - } - - } while (-not $sr.EndOfStream) - } - - END { - $datasource - $sr.Close() - del $csvf - } -} - -# helper function which trims the full-length disk state -function Format-StorageBusCacheDiskState( - [string] $DiskState - ) -{ - $DiskState -replace 'CacheDiskState','' -} - -function Get-StorageBusCacheReport -{ - # aliases usage in this module is idiomatic, only using defaults - [Diagnostics.CodeAnalysis.SuppressMessageAttribute("PSAvoidUsingCmdletAliases", "")] - param( - [parameter(Position=0, Mandatory=$true)] - [ValidateNotNullOrEmpty()] - [string] - $Path, - - [parameter(Mandatory=$true)] - [ReportLevelType] - $ReportLevel - ) - - <# - These are the possible DiskStates - - typedef enum - { - CacheDiskStateUnknown = 0, - CacheDiskStateConfiguring = 1, - CacheDiskStateInitialized = 2, - CacheDiskStateInitializedAndBound = 3, <- expected normal operational - CacheDiskStateDraining = 4, <- expected during RW->RO change (waiting for dirty pages -> 0) - CacheDiskStateDisabling = 5, - CacheDiskStateDisabled = 6, <- expected post-disable of S2D - CacheDiskStateMissing = 7, - CacheDiskStateOrphanedWaiting = 8, - CacheDiskStateOrphanedRecovering = 9, - CacheDiskStateFailedMediaError = 10, - CacheDiskStateFailedProvisioning = 11, - CacheDiskStateReset = 12, - CacheDiskStateRepairing = 13, - CacheDiskStateIneligibleDataPartition = 2000, - CacheDiskStateIneligibleNotGPT = 2001, - CacheDiskStateIneligibleNotEnoughSpace = 2002, - CacheDiskStateIneligibleUnsupportedSystem = 2003, - CacheDiskStateIneligibleExcludedFromS2D = 2004, - CacheDiskStateIneligibleForS2D = 2999, - CacheDiskStateSkippedBindingNoFlash = 3000, - CacheDiskStateIgnored = 3001, - CacheDiskStateNonHybrid = 3002, - CacheDiskStateInternalErrorConfiguring = 9000, - CacheDiskStateMarkedBad = 9001, - CacheDiskStateMarkedMissing = 9002, - CacheDiskStateInStorageMaintenance = 9003 <- expected during FRU/maint - } - CacheDiskState; - #> - - dir $Path\*cluster.log | sort -Property BaseName |% { - - $node = "" - if ($_.BaseName -match "^(.*)_cluster$") { - $node = $matches[1] - } - - Write-Output ("-"*40) "Node: $node" - - - ## - # Parse cluster log for the SBL Disk section - ## - - $data = Get-ClusterLogDataSource $_.FullName - - ## - # With a an SBL Disks section, provide commentary - ## - - $d = $data['SBL Disks'] - - if ($d) { - - ## - # Table of raw data, friendly cache device numbering - ## - - $idmap = @{} - $d |% { - $idmap[$_.DiskId] = $_.DeviceNumber - } - - if ($ReportLevel -eq [ReportLevelType]::Full) { - $d | sort IsSblCacheDevice,CacheDeviceId,DiskState | ft -AutoSize @{ Label = 'DiskState'; Expression = { Format-StorageBusCacheDiskState $_.DiskState }}, - DiskId,ProductId,Serial,@{ - Label = 'Device#'; Expression = {$_.DeviceNumber} - }, - @{ - Label = 'CacheDevice#'; Expression = { - if ($_.IsSblCacheDevice -eq 'true') { - '= cache' - } elseif ($idmap.ContainsKey($_.CacheDeviceId)) { - $idmap[$_.CacheDeviceId] - } elseif ($_.CacheDeviceId -eq '{00000000-0000-0000-0000-000000000000}') { - "= unbound" - } else { - # should be DiskStateMissing or OrphanedWaiting? Check live. - "= not present $($_.CacheDeviceId)" - } - } - },@{ - Label = 'SeekPenalty'; Expression = {$_.HasSeekPenalty} - }, - PathId,BindingAttributes,DirtyPages - } - - ## - # Now do basic testing of device counts - ## - - $dcache = $d |? IsSblCacheDevice -eq 'true' - $dcap = $d |? IsSblCacheDevice -ne 'true' - - Write-Output "Device counts: cache $($dcache.count) capacity $($dcap.count)" - - ## - # Test cache bindings if we do have cache present - ## - - if ($dcache) { - - # first uneven check, the basic count case - $uneven = $false - if ($dcap.count % $dcache.count) { - $uneven = $true - Write-Warning "Capacity device count does not evenly distribute to cache devices" - } - - # now look for unbound devices - $unbound = $dcap |? CacheDeviceId -eq '{00000000-0000-0000-0000-000000000000}' - if ($unbound) { - Write-Warning "There are $(@($unbound).count) unbound capacity device(s)" - } - - # unbound devices give us the second uneven case - if (-not $uneven -and ($dcap.count - @($unbound).count) % $dcache.count) { - $uneven = $true - } - - $gdev = $dcap |? DiskState -eq 'CacheDiskStateInitializedAndBound' | group -property CacheDeviceId - - if (@($gdev).count -ne $dcache.count) { - Write-Warning "Not all cache devices in use" - } - - $gdist = $gdev |% { $_.count } | group - - # in any given round robin binding of devices, there should be at most two counts; n and n-1 - - # single ratio - if (@($gdist).count -eq 1) { - Write-Output "Binding ratio is even: 1:$($gdist.name)" - } else { - # group names are n in the 1:n binding ratios - $delta = [math]::Abs([int]$gdist[0].name - [int]$gdist[1].name) - - if ($delta -eq 1 -and $uneven) { - Write-Output "Binding ratios are as expected for uneven device ratios" - } else { - Write-Warning "Binding ratios are uneven" - } - - # form list of group sizes - $s = $($gdist |% { - "1:$($_.name) ($($_.count) total)" - }) -join ", " - - Write-Output "Groups: $s" - } - } - - ## - # Provide summary of diskstate if more than one is present in the results - ## - - $g = $d | group -property DiskState - - if (@($g).count -ne 1) { - write-output "Disk State Summary:" - $g | sort -property Name | ft @{ Label = 'DiskState'; Expression = { Format-StorageBusCacheDiskState $_.Name}},@{ Label = "Number of Disks"; Expression = { $_.Count }} - } else { - write-output "All disks are in $(Format-StorageBusCacheDiskState $g.name)" - } - } - } -} - -function Get-StorageBusConnectivityReport -{ - # aliases usage in this module is idiomatic, only using defaults - [Diagnostics.CodeAnalysis.SuppressMessageAttribute("PSAvoidUsingCmdletAliases", "")] - param( - [parameter(Position=0, Mandatory=$true)] - [ValidateNotNullOrEmpty()] - [string] - $Path, - - [parameter(Mandatory=$true)] - [ReportLevelType] - $ReportLevel - ) - - function Show-SSBConnectivity($node) - { - BEGIN { - $disks = 0 - $enc = 0 - $ssu = 0 - } - PROCESS { - switch ($_.DeviceType) { - 0 { $disks += 1 } - 1 { $enc += 1 } - 2 { $ssu += 1 } - } - } - END { - "$node has $disks disks, $enc enclosures, and $ssu scaleunit" - } - } - - dir $path\Node_*\ClusPort.xml | sort -Property FullName |% { - - $file = $_.FullName - $node = "" - if ($file -match "Node_([^\\]+)\\") { - $node = $matches[1] - } - - Import-ClixmlIf $_ | Show-SSBConnectivity $node - } -} - -function Get-StorageLatencyReport -{ - # aliases usage in this module is idiomatic, only using defaults - [Diagnostics.CodeAnalysis.SuppressMessageAttribute("PSAvoidUsingCmdletAliases", "")] - param( - [parameter(Position=0, Mandatory=$true)] - [ValidateNotNullOrEmpty()] - [string] - $Path, - - [parameter(Mandatory=$true)] - [ReportLevelType] - $ReportLevel, - - [int] - $CutoffMs = 0, - - [datetime] - $TimeBase, - - [int] - $HoursOfEvents = -1 - ) - - # comment on limits/base - if ($CutoffMs) { - Write-Output "Latency Cutoff: report limited to IO of $($CutoffMs)ms and higher (as limited by distribution buckets)" - } else { - Write-Output "Latency Cutoff: none, report will show the complete IO latency distribution" - } - if ($HoursOfEvents -eq -1) { - Write-Output "Time Cutoff : none, report will show the full available IO history" - } else { - Write-Output "Time Cutoff : report will show IO history from $($TimeBase.ToString()) for the prior $HoursOfEvents hours" - } - - # comment if neither limit is being used - if (-not $CutoffMs -and $HoursOfEvents -eq -1) { - write-output "NOTE: Show-SddcDiagnosticStorageLatencyReport provides access to time/latency cutoff limits which may significantly speed up reporting when focused on recent high latency events" - } - - $j = @() - - dir $Path\Node_*\Microsoft-Windows-Storage-Storport-Operational.EVTX | sort -Property FullName |% { - - $file = $_.FullName - $node = "" - if ($file -match "Node_([^\\]+)\\") { - $node = $matches[1] - } - - # parallelize processing of per-node event logs - - $j += Invoke-SddcCommonCommand -InitBlock $CommonFunc -JobName $node -SessionConfigurationName $null -ScriptBlock { - - $dofull = $false - - if ($using:ReportLevel -eq "Full") - { - $dofull = $true - } - - # helper function for getting list of bucketnames from x->end - function Get-Bucket - { - param( - [int] $i, - [int] $max, - [string[]] $s - ) - - $i .. $max |% { - $l = $_ - $s |% { "BucketIo$_$l" } - } - } - - # hash for devices, label schema, and whether values are absolute counts or split success/faul - $buckhash = @{} - $bucklabels = $null - $buckvalueschema = $null - - # note: cutoff bucket is 1-based, following the actual event schema labels (BucketIoCountNNN) - $cutoffbuck = 1 - - $evs = @() - - # get all storport 505 events; there is a label field at position 6 which names - # the integer fields in the following positions. these fields countain counts - # of IOs in the given latency buckets. we assume all events have the same labelling - # scheme. - # - # 1. count the number of sample periods in which a given bucket had any io. - # 2. emit onto the pipeline the hash of counted periods and events which have - # io in the last bucket - # - # note: getting fields by position is not ideal, but getting them by name would - # appear to require pushing through an XML rendering and hashing. this would be - # less efficient and this is already somewhat time consuming. - - # the erroraction handles (potentially) disabled logs, which have no events - - # get single event from the log (if present) - $e = Get-WinEvent -Path $using:file -FilterXPath (Get-FilterXpath -Event 505) -ErrorAction SilentlyContinue -MaxEvents 1 - - if ($e) { - - # use this event to determine schema and cutoff bucket (if specified) - - $xh = Get-EventDataHash $e - - # only need to get the bucket label schema once - # the number of labels and the number of bucket counts should be equal - # determine the count schema at the same time - $bucklabels = $xh['IoLatencyBuckets'] -split ',\s+' - - # is the count scheme split (RS5) or combined (RS1)? - # match 1 is the bucket type - # match 2 is the value bucket number (1 .. n) - if ($xh.ContainsKey("BucketIoSuccess1")) { - $schemasplit = $true - $buckvalueschema = "^BucketIo(Success|Failed)(\d+)$" - } else { - $schemasplit = $false - $buckvalueschema = "^BucketIo(Count)(\d+)$" - } - - # initialize empty data element test - $DataOr = @{} - - if ($using:CutoffMs) { - - $CutoffUs = $using:CutoffMs * 1000 - - # parse the buckets to determine where the cutoff is - $a = $xh['IoLatencyBuckets'] -split ',\s+' |% { - - switch -Regex ($_) { - - "^(\d+)us$" { [int] $matches[1] } - "^(\d+)ms$" { ([int] $matches[1]) * 1000 } - "^(\d+)\+ms$" { [int]::MaxValue } - - default { throw "misparsed storport 505 event latency bucket label $_ " } - } - } - - # determine which bucket contains the cutoff, and build the must-be-gtz kv - foreach ($i in 0..($a.Count - 1)) { - if ($CutoffUs -lt $a[$i]) { - # cutoff bucket matches the event schema, which is one-based, i.e. we're putting the cutoff - # at BucketIoCount3 if we found the cutoff in the 0-1-2nd array entry - $cutoffbuck = $i+1 - break - } - } - - # ... build the named buckets in the event which must-be-gtz - if ($schemasplit) { - $buck = Get-Bucket $cutoffbuck $a.Count 'Success','Failed' - } else { - $buck = Get-Bucket $cutoffbuck $a.Count 'Count' - } - - # ... build out the DataOor as must-be-gtz tests - $DataOr = @{} - $buck |% { - $DataOr[$_] = "> 0" - } - } - - # now do two things based on determining the cutoff (or lack thereof) - # 1. relabel the cutoff bucket (the first we will return) to indicate the lower bound of latency - # - if there is no cutoff, we add 0- to indicate it contains 0-