AzStackHciNetwork/AzStackHci.Network.Helpers.psm1
Import-LocalizedData -BindingVariable lnTxt -FileName AzStackHci.Network.Strings.psd1 function Test-MgmtIpRange { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = "Specify starting Management IP Range")] [System.Collections.ArrayList] $IpPools, [Parameter(Mandatory = $false, HelpMessage = "Specify Management Subnet")] [string] $ManagementSubnetValue, [int[]] $port = @(5986, 5985, 22), [int] $Timeout = 1000, [int] $Minimum = 5, [int] $Maximum = 255, [PSObject[]] $AtcHostIntents, [System.Boolean] $ProxyEnabled = $false ) try { $instanceResults = @() # Check no repeating ips in pool and all in management subnet $TestMgmtIpPools = TestMgmtIpPools -IpPools $IpPools -ManagementSubnetValue $ManagementSubnetValue $Status = if ($TestMgmtIpPools) { 'SUCCESS' } else { 'FAILURE' } $params = @{ Name = "AzStackHci_Network_Test_IP_Pools_Subnet_No_Duplicates" Title = 'Test IP Pools in Management Subnet and No duplicate IPs in IpPools' DisplayName = "Test IP Pools $ManagementSubnetValue" Severity = 'CRITICAL' Description = 'Checking start and end address are on the same subnet' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-prerequisites#network-requirements' TargetResourceID = "IpPool-$ManagementSubnetValue" TargetResourceName = "ManagementIPRange" TargetResourceType = 'Network Range' Timestamp = [datetime]::UtcNow Status = $Status AdditionalData = @{ Source = 'CustomerNetwork' Resource = 'CustomerSubnet' Detail = if ($TestMgmtIpPools) { $lnTxt.TestIpPoolPass -f $ManagementSubnetValue } else { $lnTxt.TestIpPoolFail -f $ManagementSubnetValue } Status = $status TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params foreach ($ipPool in $IpPools) { $StartingAddress = $ipPool.StartingAddress $EndingAddress = $ipPool.EndingAddress # Check same subnet $TestMgmtSubnet = TestMgmtSubnet -StartingAddress $StartingAddress -EndingAddress $EndingAddress $Status = if ($TestMgmtSubnet) { 'SUCCESS' } else { 'FAILURE' } $params = @{ Name = 'AzStackHci_Network_Test_Management_IP_Range_Subnet' Title = 'Test Management IP Subnet' DisplayName = "Test Management IP Subnet $StartingAddress - $EndingAddress" Severity = 'CRITICAL' Description = 'Checking start and end address are on the same subnet' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-prerequisites#network-requirements' TargetResourceID = "$StartingAddress-$EndingAddress" TargetResourceName = "ManagementIPRange" TargetResourceType = 'Network Range' Timestamp = [datetime]::UtcNow Status = $Status AdditionalData = @{ Source = 'CustomerNetwork' Resource = 'CustomerSubnet' Detail = if ($TestMgmtSubnet) { $lnTxt.TestMgmtSubnetPass -f $StartingAddress, $EndingAddress } else { $lnTxt.TestMgmtSubnetFail -f $StartingAddress, $EndingAddress } Status = $status TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params # Get IP in Range $MgmtIpRange = GetMgmtIpRange -StartingAddress $StartingAddress -EndingAddress $EndingAddress foreach ($Ip in $MgmtIpRange) { $result = @{} $result += @{ 'Ping' = Test-NetConnection -ComputerName $Ip -InformationLevel Quiet -WarningAction SilentlyContinue } foreach ($p in $port) { $result += @{ $p = IsTcpPortInUse -Ip $ip -Port $p -Timeout $Timeout } } $Status = if ($true -notin $result.Values) { 'SUCCESS' } else { 'FAILURE' } $msg = $lnTxt.ActiveHostCheck -f $ip, (($result.Keys | ForEach-Object { "{0}:{1}" -f $psitem,$result[$psitem] }) -join ', ') $Type = if ($result.Values -contains $true) { 'WARNING' } else { 'INFORMATIONAL' } Log-Info $msg -Type $Type $params = @{ Name = 'AzStackHci_Network_Test_Management_IP_No_Active_Hosts' Title = 'Test Management IP Range for Active Hosts' DisplayName = "Test Management IP Range $Ip for Active Hosts" Severity = 'CRITICAL' Description = 'Checking no hosts respond on Management IP range' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-prerequisites#network-requirements' TargetResourceID = $Ip TargetResourceName = "ManagementIPRange" TargetResourceType = 'Network Range' Timestamp = [datetime]::UtcNow Status = $Status AdditionalData = @{ Source = $Ip Resource = 'ICMP/SSH/WINRM' Detail = ($result.Keys | ForEach-Object { "{0}:{1}" -f $psitem,$result[$psitem] }) -join ', ' Status = $Status TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params } } # Check range size $TestMgmtRangeSize = TestMgmtRangeSize -IpPools $IpPools -Minimum $Minimum -Maximum $Maximum $status = if ($TestMgmtRangeSize) { 'SUCCESS' } else { 'FAILURE' } $allIps = GetMgmtIpRangeFromPools -IpPools $IpPools $ipCount = $allIps.Count $params = @{ Name = 'AzStackHci_Network_Test_Management_IP_Range_Size' Title = 'Test Management IP Range Size' DisplayName = "Test Management IP Range Size of all the pools. $ipCount ips found." Severity = 'CRITICAL' Description = "Checking management IP range size is between $minimum-$maximum" Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-prerequisites#network-requirements' TargetResourceID = "Size:$ipCount " TargetResourceName = "ManagementIPRange" TargetResourceType = 'Network Range' Timestamp = [datetime]::UtcNow Status = $Status AdditionalData = @{ Source = 'CustomerNetwork' Resource = 'CustomerRange' Detail = if ($TestMgmtRangeSize) { $lnTxt.TestMgmtRangeSizePass -f $Minimum, $Maximum } else { $lnTxt.TestMgmtRangeSizeFail -f $Minimum, $Maximum } Status = if ($TestMgmtRangeSize) { 'SUCCESS' } else { 'FAILURE' } TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params #region check infra IP conneciton # A new vSwitch and management vNIC is created using Network ATC naming standards. The vSwitch is created using the intents configuration provided by the customer in the deployment json. # We will rotate the vNIC IP with the infra IPs to be tested. # Whatever is the Mgmt intent, we will create the vSwitch using the selected pNICs from the customer. # curl tool seems to provide the solution to test from specific source IP and allows to check TCP ports and also URLs # Only the first 9 IPs from the infra range will be tested. # DNS registration must be avoided on the IPs tested from the vNIC $infraIPRangeToValidate = GetMgmtIpRangeFromPools -IpPools $IpPools if ($ProxyEnabled) { Log-Info "Proxy is enabled on the host. Will check public endpoint connection via proxy." } else { Log-Info "Proxy is not enabled on the host. Will check public endpoint connetion directly." } if ((Get-Command Get-VMSwitch -ErrorAction SilentlyContinue) -and (Get-WindowsFeature -Name Hyper-V -ErrorAction SilentlyContinue).Installed) { [PSObject[]] $mgmtIntent = $AtcHostIntents | Where-Object { $_.TrafficType.Contains("Management") } [System.String] $mgmtIntentName = $mgmtIntent[0].Name [System.String[]] $mgmtAdapters = $mgmtIntent[0].Adapter [System.Guid[]] $intentAdapterGuids = (Get-NetAdapter -Name $mgmtAdapters -ErrorAction SilentlyContinue).InterfaceGuid try { $needCleanUpVMSwitch = $false $mgmtVlanIdToRestore = 0 #region prepare VMSwitch for testing infra IP connection [PSObject[]] $allExistingVMSwitches = Get-VMSwitch -SwitchType External $externalVMSwitchsCount = $allExistingVMSwitches.Count [PSObject] $foundVMSwitchToUse = $null if ($externalVMSwitchsCount -eq 0) { # if we found 0 VMSwitch, we will need to create one for this testing # Note that this operation will have the host disconnected from network for a moment (due to VMSwitch/vNIC creation) # Since the code is executed locally, this disconnection should not affect the execution Log-Info "No VMSwitch exists in system. Will create VMSwitch for testing infra IP connection." $tmpVMSwitchConfigInfo = ConfigureVMSwitchForTesting -MgmtAdapterNames $mgmtAdapters -MgmtIntentName $mgmtIntentName $foundVMSwitchToUse = $tmpVMSwitchConfigInfo.VMSwitchInfo $needCleanUpVMSwitch = $tmpVMSwitchConfigInfo.NeedCleanUp $mgmtVlanIdToRestore = $tmpVMSwitchConfigInfo.MgmtVlanId if (-not $tmpVMSwitchConfigInfo.IPReady) { Log-Info "Cannot get a VMSwitch ready on $($env:COMPUTERNAME) with valid IP on the vNIC created. Fail the validation" throw "Cannot get a VMSwitch ready on $($env:COMPUTERNAME) with valid IP on the vNIC created. Fail the validation" } } else { # if we found at least 1 VMSwitch in the system, we then need to check # If there is one VMSwitch that has the same mgmt intent adapters Log-info "Found $($externalVMSwitchsCount) VMSwitch in the system. Need to check if a valid one could be used for validation." foreach ($externalVMSwitch in $allExistingVMSwitches) { # Need to check the switch is good for deployment: using same adapter as the intent [System.Guid[]] $switchAdapterGuids = $externalVMSwitch.NetAdapterInterfaceGuid if (Compare-Object -ReferenceObject $switchAdapterGuids -DifferenceObject $intentAdapterGuids) { # Adapters used in pre-defined VMSwitch and the intent are different. Ignore that VMSwitch Log-Info "Found $($externalVMSwitch.Name) with different adapters than the mgmt intent. Skip it." } else { # if the system already have a VMSwitch with the same mgmt adpaters in its teaming, we will just use that adpater $foundVMSwitchToUse = $externalVMSwitch break } } if (-not $foundVMSwitchToUse) { Log-info "No valid VMSwitch found! Check if we could create a new VMSwitch for validaton." # At this moment, we need further checking: # If all adapters in the mgmt intent is not used by any adapter, we will need to create a new VMSwitch # If any of the adapter in the mgmt intent is used by any adapter, we will need to error out as this is not a supported scenario [System.Guid[]] $allSwitchAdapterGuids = $allExistingVMSwitches.NetAdapterInterfaceGuid [System.Boolean] $intentAdapterAlreadyUsed = $false foreach ($tmpAdapterGuid in $intentAdapterGuids) { if ($allSwitchAdapterGuids.Contains($tmpAdapterGuid)) { $intentAdapterAlreadyUsed = $true break } } if (-not $intentAdapterAlreadyUsed) { # if none of the adapter in the mgmt intent is used by any adapter, we will create a new VMSwitch Log-Info "VMSwitch found, but no VMSwitch mgmt adapters in the system. Will create VMSwitch for testing infra IP connection." $tmpVMSwitchConfigInfo = ConfigureVMSwitchForTesting -MgmtAdapterNames $mgmtAdapters -MgmtIntentName $mgmtIntentName $foundVMSwitchToUse = $tmpVMSwitchConfigInfo.VMSwitchInfo $needCleanUpVMSwitch = $tmpVMSwitchConfigInfo.NeedCleanUp $mgmtVlanIdToRestore = $tmpVMSwitchConfigInfo.MgmtVlanId if (-not $tmpVMSwitchConfigInfo.IPReady) { Log-Info "Cannot get a VMSwitch ready on $($env:COMPUTERNAME) with valid IP on the vNIC created. Fail the validation" throw "Cannot get a VMSwitch ready on $($env:COMPUTERNAME) with valid IP on the vNIC created. Fail the validation" } } else { # This is an error situation: some of the mgmt intent adapters is already used by an existing VMSwitch, some other mgmt # intent adapter is still "free" in the system. We don't know what to do with this, so need to error out Log-Info "VMSwitch found, mgmt adapter list is not matching to any VMSwitch adapter list. Wrong configuration. Will to fail the validation" } } } #endregion $allPublicEndpointServicesToCheck = Get-AzstackHciConnectivityTarget | Where-Object { $_.Name -Like "Azure_Kubernetes_Service_*" -or $_.Name -Like "AzStackHci_MOCStack_*" -or $_.Name -Like "Vm_Management_HCI_*" } if ($foundVMSwitchToUse) { Log-Info "Got VMSwitch $($foundVMSwitchToUse.Name) to use for validating infra IP connection. Start the validation..." $mgmtAlias = "vManagement($($MgmtIntentName))" $prefixLength = (Get-NetIPAddress -InterfaceAlias $mgmtAlias -AddressFamily IPv4).PrefixLength[0] $mgmtIPConfig = Get-NetIPConfiguration -InterfaceAlias $mgmtAlias $defaultGateway = $mgmtIPconfig.IPv4DefaultGateway[0].NextHop # Try to get DNS server IP from running system [PSObject[]] $getDNSServers = Get-DnsClientServerAddress -InterfaceAlias $mgmtAlias -AddressFamily IPv4 -ErrorAction SilentlyContinue if ($getDNSServers -and ($getDNSServers.Count -gt 0) -and ($getDNSServers[0].ServerAddresses.Count -gt 0)) { $dnsServerIPToCheck = $getDNSServers[0].ServerAddresses[0] try { #region configure vNIC for testing infra IP connection $tmpGuid = [System.Guid]::NewGuid() $newVNICName = "TestvNIC$($tmpGuid)" Log-Info "Prepare vNIC $($newVNICName) on the VMSwitch to use for infra IP connection validation" if (Get-VMNetworkAdapter -All | where-Object { $_.Name -eq $newVNICName }) { Remove-VMNetworkAdapter -ManagementOS -SwitchName $foundVMSwitchToUse.Name -Name $newVNICName -Confirm:$false } Add-VMNetworkAdapter -ManagementOS -SwitchName $foundVMSwitchToUse.Name -Name $newVNICName Get-NetAdapter -name "vEthernet ($($newVNICName))" -ErrorAction SilentlyContinue | Rename-NetAdapter -NewName $newVNICName Set-DnsClient -InterfaceAlias $newVNICName -RegisterThisConnectionsAddress $false $mgmtVlanIdToSet = (Get-VMNetworkAdapterIsolation -ManagementOS -VMNetworkAdapterName $mgmtAlias).DefaultIsolationID if ($mgmtVlanIdToSet -ne 0) { Set-VMNetworkAdapterIsolation -ManagementOS ` -VMNetworkAdapterName $newVNICName ` -IsolationMode Vlan ` -AllowUntaggedTraffic $true ` -DefaultIsolationID $mgmtVlanIdToSet } Set-NetIPInterface -InterfaceAlias $newVNICName -Dhcp Disabled # Need to wait until the DHCP is disabled on the adapter. Otherwise, following call might fail [System.Boolean] $vNicReady = $false $stopWatch = [System.diagnostics.stopwatch]::StartNew() while (-not $vNicReady -and ($stopWatch.Elapsed.TotalSeconds -lt 30)) { if ((Get-VMNetworkAdapter -ManagementOS -Name $newVNICName -ErrorAction SilentlyContinue) -and ((Get-NetIPInterface -InterfaceAlias $newVNICName -AddressFamily IPv4).Dhcp -eq "Disabled")) { $vNicReady = $true break } else { Start-Sleep -Seconds 3 } } #endregion if ($vNICReady) { Log-Info "VMNetworkAdapter [ $($newVNICName) ] ready and DHCP is disabled on the adapter." $retryTimes = 10 Log-Info "Will check connection from Infra IP to DNS server [ $($dnsServerIPToCheck):53 ] and public endpoints for max of $($retryTimes) times" ################################### # Start testing infra IP connection ################################### # Magic number: we will test only first 9 IPs from the infra range as: # 6 are the one we requested right now for services running in HCI cluster # 3 are the additional that might be used in the future (for example, SLB VM, etc.) # We don't want to test all the infra IP as it will requires a lot of time to finish the validation $ipNumberToCheck = 9 if ($infraIPRangeToValidate.Count -lt $ipNumberToCheck) { $ipNumberToCheck = $infraIPRangeToValidate.Count } for ($i=0; $i -lt $ipNumberToCheck; $i++) { $endpointIndex = 1 $ipToCheck = $infraIPRangeToValidate[$i] Log-Info "`nCheck IP $($i+1) / $($ipNumberToCheck): [ $($ipToCheck) ]" #region Set new IP on the adapter # Make sure no IP on the adapter $oldIpAddresses = Get-NetIPAddress -InterfaceAlias $newVNICName foreach ($ip in $oldIpAddresses) { Remove-NetIPAddress -InterfaceAlias $newVNICName -IPAddress $ip.IPAddress -Confirm:$false -ErrorAction SilentlyContinue } if (Get-NetRoute -InterfaceAlias $newVNICName -DestinationPrefix 0.0.0.0/0 -ErrorAction SilentlyContinue) { New-NetIPAddress -InterfaceAlias $newVNICName -IPAddress $ipToCheck -PrefixLength $prefixLength -SkipAsSource $true | Out-Null } else { New-NetIPAddress -InterfaceAlias $newVNICName -IPAddress $ipToCheck -PrefixLength $prefixLength -DefaultGateway $defaultGateway -SkipAsSource $true | Out-Null } # Wait for the new IP to be ready [System.Boolean] $currentIPReady = $false $ipStopWatch = [System.diagnostics.stopwatch]::StartNew() while (-not $currentIPReady -and ($ipStopWatch.Elapsed.TotalSeconds -lt 10)) { $ipConfig = Get-NetIPAddress -InterfaceAlias $newVNICName | Where-Object { $_.IPAddress -eq $ipToCheck -and $_.PrefixOrigin -eq "Manual" -and $_.AddressFamily -eq "IPv4" -and $_.AddressState -eq "Preferred" } if ($ipConfig) { $currentIPReady = $true break } else { Start-Sleep -Seconds 3 } } #endregion #region Check connection from infra IP to DNS server Log-Info " >> Trying DNS connection to $($dnsServerIPToCheck) port 53." [System.Boolean] $isDnsConnected = $false $retry = 1 while ((-not $isDnsConnected) -and ($retry -le $retryTimes)) { try { $src = [System.Net.IPEndPoint]::new([ipaddress]::Parse($ipToCheck),0) $tc = [System.Net.Sockets.TcpClient]::new($src) $tc.Connect($dnsServerIPToCheck, 53) if ($tc.Connected) { Log-Info " == DNS connection ESTABLISHED on attempt $($retry)" $isDnsConnected = $true break } else { Log-Info " ?? FAILED DNS connection on attempt $($retry)" } } catch { Log-Info " ?? FAILED! Got exception while checking DNS connection on attempt ($($retry))!" } finally { if ($tc) { $tc.Dispose() } } Start-Sleep -Seconds 3 $retry++ } #endregion $dnsConnectionRstParams = @{ Name = 'AzStackHci_Network_Test_Infra_IP_Connection_DNS_Server_Port_53' Title = 'Test DNS server port connection for all IP in infra IP pool' DisplayName = "Test DNS server port connection for all IP in infra IP pool" Severity = 'CRITICAL' Description = 'Test DNS server port connection for all IP in infra IP pool' Tags = @{} Remediation = "Make sure infra IP $ipToCheck could connect to your DNS server correctly." TargetResourceID = "Infra_IP_Connection_$($ipToCheck)" TargetResourceName = "Infra_IP_Connection_$($ipToCheck)" TargetResourceType = "Infra_IP_Connection_$($ipToCheck)" Timestamp = [datetime]::UtcNow Status = "FAILURE" AdditionalData = @{ Source = $env:COMPUTERNAME Resource = $($ipToCheck) Detail = "[FAILED] Connection from $ipToCheck to $($dnsServerIPToCheck):53 failed after 3 attempts." Status = "FAILURE" TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } if ($ProxyEnabled) { # In case proxy is enabled, we will downgrade the severity to WARNING as the DNS resolution might happen on proxy server $dnsConnectionRstParams.Severity = 'WARNING' } if ($isDnsConnected) { $dnsConnectionRstParams.Status = "SUCCESS" $dnsConnectionRstParams.AdditionalData.Detail = "[PASSED] Connection from $ipToCheck to $($dnsServerIPToCheck):53 passed." $dnsConnectionRstParams.AdditionalData.Status = "SUCCESS" #region Check connection from infra IP to well known endpoints # Since we rely on DNS naming resolution, we put the checking here in this if statement foreach ($service in $allPublicEndpointServicesToCheck) { foreach ($endpointInService in $service.EndPoint) { # Will try $retryTimes curl connection to the remote endpoint # Note that curl.exe honor the system HTTP_PROXY/HTTPS_PROXY settings, so we don't need to specify "--proxy" parameter here $endpointToCheck = "$($service.Protocol[0])://$($endpointInService)" $curlGetExpression = "curl.exe -sS --connect-timeout 5 -m 6 `"$($endpointToCheck)`" --interface $($ipToCheck)" $curlHeaderExpression = "curl.exe -i -sS --connect-timeout 5 -m 6 `"$($endpointToCheck)`" --interface $($ipToCheck)" Log-Info " >> Trying public Endpoint connection $($endpointIndex) to $($endpointToCheck)." [System.Boolean] $isPublicEndpointConnected = $false $retry = 1 while ((-not $isPublicEndpointConnected) -and ($retry -le $retryTimes)) { try { $curlGetContent = Invoke-Expression $curlGetExpression if (-not [System.String]::IsNullOrWhiteSpace($curlGetContent)) { Log-Info " == Connection ESTABLISHED with GET on attempt $($retry)" $isPublicEndpointConnected = $true break } else { $curlHeaderContent = Invoke-Expression $curlHeaderExpression # Need to analyze the output of $curlHeaderContent to see if the connection is established # If proxy enabled, the response will need to contain something in addition to the "HTTP/1.1 200 Connection established" if ($ProxyEnabled) { $curlHeaderContent = $curlHeaderContent -replace "^HTTP\/\d\.\d 200 Connection established", "" } if (-not [System.String]::IsNullOrWhiteSpace($curlHeaderContent)) { Log-Info " == Connection ESTABLISHED with HEADER only on attempt $($retry)" $isPublicEndpointConnected = $true break } else { Log-Info " ?? FAILED connection on attempt $($retry)" } } } catch { Log-Info " ?? FAILED! Got exception while checking public endpoint connection on attempt ($($retry))!" } Start-Sleep -Seconds 3 $retry++ } if ([System.String]::IsNullOrEmpty($service.Severity) -or [System.String]::IsNullOrWhiteSpace($service.Severity)) { $currentSeverity = "CRITICAL" } else { $currentSeverity = $service.Severity } $publicEndpointRstParams = @{ Name = 'AzStackHci_Network_Test_Infra_IP_Connection_' + $service.Name Title = 'Test outbound connection for IP in infra IP pool' DisplayName = "Test outbound connection for IP in infra IP pool" + $service.Title Severity = $currentSeverity Description = 'Test connection for IP in infra IP pool ' + $service.Description Tags = @{} Remediation = "Make sure infra IP $ipToCheck could connect to public endpoint $endpointToCheck correctly. `nhttps://learn.microsoft.com/en-us/azure/azure-arc/servers/network-requirements?tabs=azure-cloud#urls" TargetResourceID = $service.TargetResourceID TargetResourceName = $service.TargetResourceName TargetResourceType = $service.TargetResourceType Timestamp = [datetime]::UtcNow Status = "FAILURE" AdditionalData = @{ Source = $env:COMPUTERNAME Resource = $($ipToCheck) Detail = "[FAILED] Connection from $ipToCheck to $endpointToCheck failed after $retryTimes attempts" Status = "FAILURE" TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } if ($isPublicEndpointConnected) { $publicEndpointRstParams.Status = "SUCCESS" $publicEndpointRstParams.AdditionalData.Detail = "[PASSED] Connection from $ipToCheck to $endpointToCheck passed." $publicEndpointRstParams.AdditionalData.Status = "SUCCESS" } else { Log-info "Public Endpoint connection failed for infra IP $ipToCheck." } $instanceResults += New-AzStackHciResultObject @publicEndpointRstParams $endpointIndex++ } } #endregion } else { Log-info "DNS connection failed for infra IP $ipToCheck." } $instanceResults += New-AzStackHciResultObject @dnsConnectionRstParams } } else { # vNIC creation failure. Normally won't hit this path, but keep it here for safety Log-Info "Cannot get a vNIC ready on VMSwitch $($foundVMSwitchToUse.Name) in $($env:COMPUTERNAME) for validating infra IP connection. Fail the validation" $params = @{ Name = 'AzStackHci_Network_Test_Infra_IP_Connection_vNIC_Readiness' Title = 'Test virtual adapter readiness for all IP in infra IP pool' DisplayName = "Test virtual adapter readiness for all IP in infra IP pool" Severity = 'CRITICAL' Description = 'Test virtual adapter readiness for all IP in infra IP pool' Tags = @{} Remediation = "Make sure Add/Get-VMNetworkAdapter on $($env:COMPUTERNAME) can run correctly." TargetResourceID = "Infra_IP_Connection_VNICReadiness" TargetResourceName = "Infra_IP_Connection_VNICReadiness" TargetResourceType = "Infra_IP_Connection_VNICReadiness" Timestamp = [datetime]::UtcNow Status = "FAILURE" AdditionalData = @{ Source = $env:COMPUTERNAME Resource = 'VNICReadiness' Detail = "[FAILED] Cannot test connection for infra IP. VM network adapter is not configured correctly on host $($env:COMPUTERNAME)." Status = "FAILURE" TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params } } finally { # Best effort to clean the IP used, as the last IP checked might not be cleaned in the previous checking for ($i=0; $i -lt $ipNumberToCheck; $i++) { Remove-NetIPAddress -IPAddress $infraIPRangeToValidate[$i] -ErrorAction SilentlyContinue -Confirm:$false } # Clean up the vNIC if (Get-VMNetworkAdapter -ManagementOS -Name $newVNICName -ErrorAction SilentlyContinue) { Remove-VMNetworkAdapter -ManagementOS -SwitchName $foundVMSwitchToUse.Name -Name $newVNICName -Confirm:$false } } } else { # No DNS client server address found on the adapter Log-Info "Cannot get DNS client server address correctly on $($env:COMPUTERNAME) for validating infra IP connection. Fail the validation" $params = @{ Name = 'AzStackHci_Network_Test_Infra_IP_Connection_DNSClientServerAddress_Readiness' Title = 'Test DNS client server addresses readiness for all IP in infra IP pool' DisplayName = "Test DNS client server addresses readiness for all IP in infra IP pool" Severity = 'CRITICAL' Description = 'Test DNS client server addresses readiness for all IP in infra IP pool' Tags = @{} Remediation = "Set DNS client server address correctly on management adapter [ $($mgmtAlias) ] on $($env:COMPUTERNAME). Check it using Get-DnsClientServerAddress" TargetResourceID = "Infra_IP_Connection_DNSClientReadiness" TargetResourceName = "Infra_IP_Connection_DNSClientReadiness" TargetResourceType = "Infra_IP_Connection_DNSClientReadiness" Timestamp = [datetime]::UtcNow Status = "FAILURE" AdditionalData = @{ Source = $env:COMPUTERNAME Resource = 'DNSClientReadiness' Detail = "[FAILED] Cannot find correctly DNS client server address on host $($env:COMPUTERNAME)." Status = "FAILURE" TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params } } else { Log-Info "Cannot get a VMSwitch to use on $($env:COMPUTERNAME) for validating infra IP connection. Fail the validation" $params = @{ Name = 'AzStackHci_Network_Test_Infra_IP_Connection_VMSwitch_Readiness' Title = 'Test VMSwitch readiness for all IP in infra IP pool' DisplayName = "Test VMSwitch readiness for all IP in infra IP pool" Severity = 'CRITICAL' Description = 'Test VMSwitch readiness for all IP in infra IP pool' Tags = @{} Remediation = "Make sure at least one VMSwitch preconfigured on the host $($env:COMPUTERNAME) has the same set of adapters defined in management intent." TargetResourceID = "Infra_IP_Connection_VMSwitchReadiness" TargetResourceName = "Infra_IP_Connection_VMSwitchReadiness" TargetResourceType = 'Infra_IP_Connection_VMSwitchReadiness' Timestamp = [datetime]::UtcNow Status = "FAILURE" AdditionalData = @{ Source = $env:COMPUTERNAME Resource = 'VMSwitchReadiness' Detail = "[FAILED] Cannot test connection for infra IP with wrong VMSwitch configured on host $($env:COMPUTERNAME)." Status = "FAILURE" TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params } } finally { if ($needCleanUpVMSwitch) { # Clean up the VMSwitch created for testing Log-Info "Clean up VMSwitch $($foundVMSwitchToUse.Name) created during the testing" Remove-VMSwitch -Name $foundVMSwitchToUse.Name -Force -ErrorAction SilentlyContinue if ($mgmtVlanIdToRestore -ne 0) { foreach ($tmpAdapter in $mgmtAdapters) { Log-Info "Restore VlanId for adapter $tmpAdapter to $mgmtVlanIdToRestore" Set-NetAdapterAdvancedProperty -Name $tmpAdapter -RegistryKeyword "VlanID" -RegistryValue $mgmtVlanIdToRestore } } # In case of DHCP scenario, after VMSwitch removed, the pNIC might not get the IP address immediately # Wait for some time (15 seconds) to make sure the new IP is settled correctly. [System.Boolean] $currentIPReady = $false $ipStopWatch = [System.diagnostics.stopwatch]::StartNew() while (-not $currentIPReady -and ($ipStopWatch.Elapsed.TotalSeconds -lt 15)) { # If the pNIC has Manual or Dhcp IPv4 address with "Preferred" state, we consider it as "ready" $ipConfig = Get-NetIPAddress -InterfaceAlias $mgmtAdapters[0] | Where-Object { ($_.PrefixOrigin -eq "Manual" -or $_.PrefixOrigin -eq "Dhcp") -and $_.AddressFamily -eq "IPv4" -and $_.AddressState -eq "Preferred" } if ($ipConfig) { $currentIPReady = $true break } else { Start-Sleep -Seconds 3 } } if (-not $currentIPReady) { # should not get into here, but keep it here for safety Log-Info "Cannot get the IP address back to the pNIC after VMSwitch removed. Please check the system manually." throw "Cannot get the IP address back to the pNIC after VMSwitch removed. Please check the system manually." } else { Log-Info "IP address back to the pNIC after VMSwitch removed. System is ready for next validation." } } else { Log-Info "VMSwitch $($foundVMSwitchToUse.Name) pre-exist in the system. No need to clean up." } # will need to restart the device manager service $edgeDeviceTxtFile = "C:\ProgramData\GuestConfig\extension_Logs\Microsoft.Edge.DeviceManagementExtension\EdgeDevice.txt" if (Test-Path $edgeDeviceTxtFile -PathType Leaf) { Log-Info "Device management extension info file found, need to refresh it after vNIC removed." Log-Info "Clean up the Device Extension info file and restart DeviceManagementService." Remove-Item -Path $edgeDeviceTxtFile -Force Restart-Service -Name DeviceManagementService -Force # Wait for the Device Extension info file to be refreshed Log-Info "Wait for 60 seconds to wait for Device Extension info file to be refreshed after vNIC removed." [System.Boolean] $deviceExtFileRefreshReady = $false $devExtStopWatch = [System.diagnostics.stopwatch]::StartNew() while ($devExtStopWatch.Elapsed.TotalSeconds -lt 60) { if (Test-Path $edgeDeviceTxtFile -PathType Leaf) { Log-Info "Found Device Extension info file recreated" $deviceExtFileRefreshReady = $true break } else { Log-Info "Waiting..." Start-Sleep -Seconds 3 } } if ($deviceExtFileRefreshReady) { Log-Info "Device management extension info file refreshed after vNIC removed. Ready for next test." } else { Log-Info "Not able to get Device Extension info file ready after 60 seconds. Fail validation" throw "Not able to get Device Extension info file ready after 60 seconds. Fail validation" } } } } else { Log-Info "Hyper-V is not working correctly on $($env:COMPUTERNAME). Fail testing infra IP connection." $params = @{ Name = 'AzStackHci_Network_Test_Infra_IP_Connection_Hyper_V_Readiness' Title = 'Test Hyper-V readiness for all IP in infra IP pool' DisplayName = "Test Hyper-V readiness for all IP in infra IP pool" Severity = 'CRITICAL' Description = 'Test Hyper-V readiness for all IP in infra IP pool' Tags = @{} Remediation = "Make sure that Hyper-V is installed on host $($env:COMPUTERNAME) and rerun the validation." TargetResourceID = "Infra_IP_Connection_HYPERVReadiness" TargetResourceName = "Infra_IP_Connection_HYPERVReadiness" TargetResourceType = 'Infra_IP_Connection_HYPERVReadiness' Timestamp = [datetime]::UtcNow Status = "FAILURE" AdditionalData = @{ Source = $env:COMPUTERNAME Resource = 'HYPERVReadiness' Detail = "[FAILED] Cannot test connection for infra IP without Hyper-V on host $($env:COMPUTERNAME)." Status = "FAILURE" TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params } #endregion return $instanceResults } catch { throw $_ } } function Test-HostNetworkConfigurationReadiness { [CmdletBinding()] param ( [System.Management.Automation.Runspaces.PSSession[]] $PSSession, [PSObject[]] $AtcHostIntents ) try { if (($PSSession.Count -eq 0) -or ($AtcHostIntents.Count -eq 0)) { Log-Info "No PSSession or AtcHostIntents provided. Skip run of Test-HostNetworkConfigurationReadiness" return } else { Log-Info "Will check host network adapter RDMA status, adapter symmetry and bandwidth, and other host network" Log-Info "configuration (include DNS client configuraion, Hyper-V is running correctly, VMSwitch (if exists)" Log-Info "has mgmt intent adapters, VlanId for adapters, physical adapter used in JSON." } # Check host network readiness status $hostNetworkReadinessTestResults = @() foreach ($session in $PSSession) { #region Check RDMA status Log-Info "Checking NetAdapter RDMA status on $($session.ComputerName)" $rdmaResult = Invoke-Command -Session $session -ScriptBlock ${function:CheckNetAdapterRDMAStatus} -ArgumentList @(, $AtcHostIntents) if ($null -ne $rdmaResult) { Log-Info "Got RDMA validation results from $($session.ComputerName)" $currentMachineRdmaStatus = if ($rdmaResult.Pass) { 'SUCCESS' } else { 'FAILURE' } $currentMachineRdmaTestDetailMessage = $rdmaResult.Message } else { Log-Info "NO RDMA validation results found from $($session.ComputerName)" $currentMachineRdmaStatus = 'FAILURE' $currentMachineRdmaTestDetailMessage = "NO RDMA validation results returned by function CheckNetAdapterRDMAStatus from server $($session.ComputerName)" } $rdmaRstObject = @{ Name = 'AzStackHci_Network_Test_NetAdapter_RDMA_Operational' Title = 'Test NetAdapter RDMA requirement' DisplayName = "Test if RDMA requirement meets for the deployment on all servers" Severity = 'CRITICAL' Description = 'Checking RDMA Operational Status on {0}' -f $session.ComputerName Tags = @{} Remediation = 'Make sure adapter RDMA is operational. Use Get-NetAdapterRdma cmdlet to check the status of RDMA for the network adapter in the system.' TargetResourceID = $session.ComputerName TargetResourceName = "NetAdapter" TargetResourceType = 'Network Adapter RDMA' Timestamp = [datetime]::UtcNow Status = $currentMachineRdmaStatus AdditionalData = @{ Source = $session.ComputerName Resource = 'Network Adapter RDMA Operational Status' Detail = $currentMachineRdmaTestDetailMessage Status = $currentMachineRdmaStatus TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $hostNetworkReadinessTestResults += New-AzStackHciResultObject @rdmaRstObject #endregion #region Check adapter symmetry and bandwidth requirement Log-Info "Checking NetAdapter symmetry bandwidth requirement on $($session.ComputerName)" $adapterSymmetryAndBandwidthResult = Invoke-Command -Session $session -ScriptBlock ${function:CheckAdapterSymmetryAndBandwidth} -ArgumentList @(, $AtcHostIntents) if ($null -ne $adapterSymmetryAndBandwidthResult) { Log-Info "Got adapter symmetry and bandwidth validation results from $($session.ComputerName)" $currentMachineAdapterSymmetryBandwidthStatus = if ($adapterSymmetryAndBandwidthResult.Pass) { 'SUCCESS' } else { 'FAILURE' } $currentMachineAdapterSymmetryBandwidthTestDetailMessage = $adapterSymmetryAndBandwidthResult.Message } else { Log-Info "NO adapter symmetry and bandwidth validation results found from $($session.ComputerName)" $currentMachineAdapterSymmetryBandwidthStatus = 'FAILURE' $currentMachineAdapterSymmetryBandwidthTestDetailMessage = "NO adapter symmetry and bandwidth validation results returned by function CheckAdapterSymmetryAndBandwidth from server $($session.ComputerName)" } $adapterSymmetryRstObject = @{ Name = 'AzStackHci_Network_Test_NetAdapter_Symmetry_Bandwidth' Title = 'Test NetAdapter symmetry and bandwidth requirement' DisplayName = "Test if network adapters used in one intent is symmetry and if bandwidth meets minimum requirement" Severity = 'CRITICAL' Description = 'Checking network adapters and bandwidth Status on {0}' -f $session.ComputerName Tags = @{} Remediation = 'Make sure adapters used in intent are symmetry and mininum bandwidth to use for RDMA is 10G. Use Get-NetAdapter cmdlet on the system to check the adapter information.' TargetResourceID = $session.ComputerName TargetResourceName = "NetAdapter" TargetResourceType = 'Network Adapter Symmetry and Bandwidth' Timestamp = [datetime]::UtcNow Status = $currentMachineAdapterSymmetryBandwidthStatus AdditionalData = @{ Source = $session.ComputerName Resource = 'Network Adapter Symmetry and Bandwidth' Detail = $currentMachineAdapterSymmetryBandwidthTestDetailMessage Status = $currentMachineAdapterSymmetryBandwidthStatus TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $hostNetworkReadinessTestResults += New-AzStackHciResultObject @adapterSymmetryRstObject #endregion #region Host network configuration readiness Log-Info "Checking host network readiness configuration on $($session.ComputerName)" $networkReadinessResult = Invoke-Command -Session $session -ScriptBlock ${function:CheckHostNetworkConfigurationReadiness} -ArgumentList @(, $AtcHostIntents) if ($null -ne $networkReadinessResult) { Log-Info "Network readiness check results from $($session.ComputerName)" $currentMachineNetworkReadinessStatus = if ($networkReadinessResult.Pass) { 'SUCCESS' } else { 'FAILURE' } $currentMachineNetworkReadinessTestDetailMessage = $networkReadinessResult.Message } else { Log-Info "NO host network configuration readiness validation results found from $($session.ComputerName)" $currentMachineNetworkReadinessStatus = 'FAILURE' $currentMachineNetworkReadinessTestDetailMessage = "NO host network configuration readiness validation results returned by function CheckHostNetworkConfigurationReadiness from $($session.ComputerName)" } $networkReadinessRstObject = @{ Name = 'AzStackHci_Network_Test_HostNetworkConfigurationReadiness' Title = 'Test host network configuration readiness' DisplayName = "Test if host network requirement meets for the deployment on all servers" Severity = 'CRITICAL' Description = 'Checking host network configuration readiness status on {0}' -f $session.ComputerName Tags = @{} Remediation = 'Make sure host network configuration readiness is correct. Review detail message to find out the issue.' TargetResourceID = $session.ComputerName TargetResourceName = "HostNetworkReadiness" TargetResourceType = 'HostNetworkReadiness' Timestamp = [datetime]::UtcNow Status = $currentMachineNetworkReadinessStatus AdditionalData = @{ Source = $session.ComputerName Resource = 'HostNetworkReadiness configuration status' Detail = $currentMachineNetworkReadinessTestDetailMessage Status = $currentMachineNetworkReadinessStatus TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $hostNetworkReadinessTestResults += New-AzStackHciResultObject @networkReadinessRstObject #endregion } return $hostNetworkReadinessTestResults } catch { throw $_ } } function Test-AdapterDriverMgmtAdapterReadiness { [CmdletBinding()] param ( [System.Management.Automation.Runspaces.PSSession[]] $PSSession, [PSCredential] $SessionCredential, [PSObject[]] $AtcHostIntents ) try { $inboxDriverMgmtIPTestResults = @() $checkInboxDriverScript = { [CmdletBinding()] param ( [String[]] $AdapterNames ) $retVal = New-Object PSObject -Property @{ Pass = $true Message = "Adapter inbox driver check on $($ENV:COMPUTERNAME)" } [PSObject[]] $allAdaptersInfo = Get-NetAdapter -Name $AdapterNames -ErrorAction SilentlyContinue if (($allAdaptersInfo.Count -eq 0) -or ($allAdaptersInfo.Count -ne $AdapterNames.Count)) { $retVal.Pass = $false $retVal.Message += "`nFailed: Adapter(s) not found on the system" $retVal.Message += "`nExpected adapter(s): $($AdapterNames | Out-String)" $retVal.Message += "`nFound adapter(s): $($allAdaptersInfo | Out-String)" } else { $adaptersUsingInboxDriver = $allAdaptersInfo | Where-Object { $_.DriverProvider -match "Microsoft" -or $_.DriverProvider -match "Windows" } $hardwareType = (Get-WmiObject -Class Win32_ComputerSystem).Model # Check adatper is not inbox driver for physical environment only if (-not $adaptersUsingInboxDriver -or ($hardwareType -eq "Virtual Machine")) { $retVal.Message += "`nPassed: No adapter using inbox driver found, or current system is a virtual environment." $retVal.Message += "`nAdapter(s) checked: $($AdapterNames)" } else { $retVal.Pass = $false $retVal.Message += "`nFailed: Adapter(s) using inbox driver found on non-virtual environment`n" $retVal.Message += ($adaptersUsingInboxDriver.Name | Out-String) } } return $retVal } $checkMgmtAdapterScript = { [CmdletBinding()] param ( [String[]] $MgmtAdapterNames, [String] $MgmtIntentName ) $retVal = New-Object PSObject -Property @{ Pass = $true Message = "Management adapter IP and DNS check on $($ENV:COMPUTERNAME)" } $mgmtVNicName = "vManagement($($MgmtIntentName))" [PSObject[]] $allExistingVMSwitches = Get-VMSwitch -SwitchType External if ($allExistingVMSwitches.Count -gt 0) { # VMSwitch should contains 0 or all of the mgmt adapters # if VMSwitch contains all mgmt adapters, then there should be 1 vNIC named as "vManagement(mgmtintentname)"" foreach ($externalVMSwitch in $allExistingVMSwitches) { # Need to check the switch is good for deployment: using same adapter as the intent [System.Guid[]] $switchAdapterGuids = $externalVMSwitch.NetAdapterInterfaceGuid [System.Guid[]] $intentAdapterGuids = (Get-NetAdapter -Name $MgmtAdapterNames -Physical -ErrorAction SilentlyContinue).InterfaceGuid if (Compare-Object -ReferenceObject $switchAdapterGuids -DifferenceObject $intentAdapterGuids) { # Adapters used in pre-defined VMSwitch and the intent are different. Need to make sure 0 mgmt adapter used by VMSwitch foreach ($mgmtAdapter in $intentAdapterGuids) { if ($switchAdapterGuids -contains $mgmtAdapter) { $retVal.Pass = $false $retVal.Message += "`nFailed: Adapter with GUID $($mgmtAdapter) defined in management intent is used by VMSwitch $($externalVMSwitch.Name). Please make sure VMSwitch use ALL adapters defined in the management intent." } } } else { $retVal.Message += "`nPassed: Found one VMSwitch [ $($externalVMSwitch.Name) ] in the system that uses all physical adapters defined in the management intent." # VMSwitch uses same set of adapters defined in mgmt intent, will need to check there is a vNIC named as "vManagement(mgmtintentname)" $expectedMgmtNIC = Get-VMNetworkAdapter -ManagementOS -Name $mgmtVNicName -ErrorAction SilentlyContinue if ($expectedMgmtNIC.Count -ne 1) { $retVal.Pass = $false $retVal.Message += "`nFailed: Expected 1 vNIC with name [ $($mgmtVNicName) ] in the system. But found $($expectedMgmtNIC.Count)." } else { $retVal.Message += "`nPassed: Found one vNIC named as $mgmtVNicName in the system" } } } } [String[]] $adaptersToCheck = @() if (Get-NetAdapter -Name $mgmtVNicName -ErrorAction SilentlyContinue) { $adaptersToCheck = @($mgmtVNicName) } else { $adaptersToCheck = $MgmtAdapterNames } foreach ($currentAdapter in $adaptersToCheck) { # Adapter IP checking [PSObject[]] $currentAdapterAddresses = Get-NetIPAddress -InterfaceAlias $currentAdapter -AddressFamily IPv4 -PolicyStore ActiveStore -ErrorAction SilentlyContinue if ($currentAdapterAddresses.Count -ne 1) { $retVal.Pass = $false $retVal.Message += "`nFailed: Expected [ 1 ] IP address on management adapter $currentAdapter. Found [ $($currentAdapterAddresses.Count) ]." $retVal.Message += "`nIP addresses found: $($currentAdapterAddresses.IPAddress | Out-String)" } else { $retVal.Message += "`nPassed: Only 1 IP address found on management adapter $currentAdapter" $retVal.Message += "`nIP address found: $($currentAdapterAddresses.IPAddress | Out-String)" } } # Adapter DNS server checking. This check only be performed on the 1st adapter in the list as other adapters might not have valid IP address configured on it during the test [PSObject[]] $mgmtAdapterDNSClientServerAddresses = Get-DnsClientServerAddress -InterfaceAlias $adaptersToCheck[0] -AddressFamily IPv4 -ErrorAction SilentlyContinue if ($mgmtAdapterDNSClientServerAddresses -and ($mgmtAdapterDNSClientServerAddresses.Count -gt 0) -and ($mgmtAdapterDNSClientServerAddresses[0].ServerAddresses.Count -gt 0)) { $retVal.Message += "`nPassed: DNS client server addresses found on management adapter $($adaptersToCheck[0])" $retVal.Message += "`nIP address found: $($mgmtAdapterDNSClientServerAddresses.ServerAddresses | Out-String)" } else { $retVal.Pass = $false $retVal.Message += "`nFailed: Cannot find any DNS client server addresses on management adapter $($adaptersToCheck[0])" } return $retVal } foreach ($session in $PSSession) { #region Make sure session is there and open # This is needed here as the Test-MgmtIP test might break the session due to potential VMSwitch/vNIC creation if ($session.State -ne 'Opened') { Log-Info "Previous PSSession is closed. Opening new session for testing" if ($SessionCredential) { $sessionToCheck = New-PsSessionWithRetriesInternal -Node $session.ComputerName -Credential $SessionCredential } else { throw "Session is not opened and no credential provided" } } else { $sessionToCheck = $session } #endregion #region TEST1: Check inbox driver for all adapters in the host Log-Info "Check storage intent adapter inbox driver on $($sessionToCheck.ComputerName)" [System.String[]] $storageAdapters = $AtcHostIntents | Where-Object { $_.TrafficType.Contains("Storage") } | Select-Object -ExpandProperty Adapter if ($storageAdapters.Count -gt 0) { $tmpInboxDriverCheckRst = Invoke-Command -Session $sessionToCheck -ScriptBlock $checkInboxDriverScript -ArgumentList @(, $storageAdapters) if ($null -ne $tmpInboxDriverCheckRst) { Log-Info "Got inbox driver validation results from $($sessionToCheck.ComputerName)" $currentMachineInboxDriverTestStatus = if ($tmpInboxDriverCheckRst.Pass) { 'SUCCESS' } else { 'FAILURE' } $currentMachineInboxDriverTestDetailMessage = $tmpInboxDriverCheckRst.Message } else { # Should not come here, just a safe guard Log-Info "NO inbox driver validation results found from $($sessionToCheck.ComputerName)" $currentMachineInboxDriverTestStatus = 'FAILURE' $currentMachineInboxDriverTestDetailMessage = "NO inbox driver validation results returned from server $($session.ComputerName)" } $inboxDriverCheckRstObject = @{ Name = 'AzStackHci_Network_Test_AdapterDriver' Title = 'Test system adapter driver provider' DisplayName = 'Test system adapter driver provider' Severity = 'CRITICAL' Description = 'Checking adapter driver on {0}' -f $sessionToCheck.ComputerName Tags = @{} Remediation = 'Make sure adapter is not using inbox driver on host!' TargetResourceID = $sessionToCheck.ComputerName TargetResourceName = "AdapterDriver" TargetResourceType = 'AdapterDriver' Timestamp = [datetime]::UtcNow Status = $currentMachineInboxDriverTestStatus AdditionalData = @{ Source = $sessionToCheck.ComputerName Resource = 'AdapterDriver' Detail = $currentMachineInboxDriverTestDetailMessage Status = $currentMachineInboxDriverTestStatus TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $inboxDriverMgmtIPTestResults += New-AzStackHciResultObject @inboxDriverCheckRstObject } else { Log-Info "No storage adapter found in intent definition for host $($sessionToCheck.ComputerName). Skip inbox driver check for storage intent adapter." } #endregion #region TEST2: Check no more than 1 IP address and DNS client server address on mgmt adapter Log-Info "Check IP address on mgmt adapter on $($sessionToCheck.ComputerName)" [PSObject[]] $mgmtIntent = $AtcHostIntents | Where-Object { $_.TrafficType.Contains("Management") } [System.String[]] $mgmtAdapters = $mgmtIntent[0].Adapter [System.String] $mgmtIntentName = $mgmtIntent[0].Name $tmpMgmtAdapterIPCheckRst = Invoke-Command -Session $sessionToCheck -ScriptBlock $checkMgmtAdapterScript -ArgumentList @($mgmtAdapters, $mgmtIntentName) if ($null -ne $tmpMgmtAdapterIPCheckRst) { Log-Info "Got mgmt adapter IP validation results from $($sessionToCheck.ComputerName)" $currentMachineMgmtIPTestStatus = if ($tmpMgmtAdapterIPCheckRst.Pass) { 'SUCCESS' } else { 'FAILURE' } $currentMachineMgmtIPTestDetailMessage = $tmpMgmtAdapterIPCheckRst.Message } else { # Should not come here, just a safe guard Log-Info "NO mgmt adapter IP validation results found from $($sessionToCheck.ComputerName)" $currentMachineMgmtIPTestStatus = 'FAILURE' $currentMachineMgmtIPTestDetailMessage = "NO mgmt adapter IP validation results returned from server $($session.ComputerName)" } $mgmtIPCheckRstObject = @{ Name = 'AzStackHci_Network_Test_MgmtIP' Title = 'Test system mgmt adapter to have only 1 valid IP assigned' DisplayName = 'Test system mgmt adapter to have only 1 valid IP assigned' Severity = 'CRITICAL' Description = 'Checking there is only 1 IP assigned to mgmt driver on {0}' -f $sessionToCheck.ComputerName Tags = @{} Remediation = 'Make sure mgmt adapter on host only have 1 valid IP assigned to each adapter and DNS server set correctly on the outbound adapter!' TargetResourceID = $sessionToCheck.ComputerName TargetResourceName = "MgmtAdapterIP" TargetResourceType = 'MgmtAdapterIP' Timestamp = [datetime]::UtcNow Status = $currentMachineMgmtIPTestStatus AdditionalData = @{ Source = $sessionToCheck.ComputerName Resource = 'MgmtAdapterIP' Detail = $currentMachineMgmtIPTestDetailMessage Status = $currentMachineMgmtIPTestStatus TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $inboxDriverMgmtIPTestResults += New-AzStackHciResultObject @mgmtIPCheckRstObject #endregion } return $inboxDriverMgmtIPTestResults } catch { throw $_ } } # Run during both Deployment and AddNode # 1. Mgmt NIC IP should not be overlapping with IP Pool # 2. Ensure Mgmt NIC IPs and IP Pool are in the same subnet function TestDHCPStatus { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = "Specify starting Management IP Range")] [System.Collections.ArrayList] $IpPools, [System.Management.Automation.Runspaces.PSSession[]] $PSSession, [pscredential] $SessionCredential ) try { $instanceResults = @() foreach ($ipPool in $IpPools) { $StartingAddress = $ipPool.StartingAddress $EndingAddress = $ipPool.EndingAddress foreach ($session in $PSSession) { $sb = { $env:COMPUTERNAME ( Get-NetIPConfiguration | Where-Object { $_.IPv4DefaultGateway -ne $null -and $_.NetAdapter.Status -eq "Up" } ).IPv4Address.IPAddress } if ($session.State -ne 'Opened') { Log-Info "Previous PSSession is closed. Opening new session for testing" if ($SessionCredential) { $currentSession = New-PsSessionWithRetriesInternal -Node $session.ComputerName -Credential $SessionCredential } else { throw "Session is not opened and no credential provided" } } else { $currentSession = $session } $NewNodeData = Invoke-Command $currentSession -ScriptBlock $sb $NodeName = $NewNodeData[0] # Check for all of the IPs found on the Host for ($i = 1; $i -lt $NewNodeData.count; $i++) { $NodeManagementIPAddress = $NewNodeData[$i] Log-Info "Node Name retrieved from session: $NodeName" Log-Info "Node Management IP Address retrieved from session: $NodeManagementIPAddress" # Check node management IP is not in infra pool range Log-Info "Starting Test Mgmt IP is not in Infra IP Pool for $($currentSession.ComputerName)" $ip = [system.net.ipaddress]::Parse($NodeManagementIPAddress).GetAddressBytes() [array]::Reverse($ip) $ip = [system.BitConverter]::ToUInt32($ip, 0) $from = [system.net.ipaddress]::Parse($StartingAddress).GetAddressBytes() [array]::Reverse($from) $from = [system.BitConverter]::ToUInt32($from, 0) $to = [system.net.ipaddress]::Parse($EndingAddress).GetAddressBytes() [array]::Reverse($to) $to = [system.BitConverter]::ToUInt32($to, 0) $mgmtIPOutsideRange = ($ip -le $from) -or ($ip -ge $to) if ($mgmtIPOutsideRange) { $TestMgmtIPInfraRangeDetail = $lnTxt.TestMgmtIPInfraRangePass -f $NodeManagementIPAddress, $StartingAddress, $EndingAddress } else { $TestMgmtIPInfraRangeDetail = $lnTxt.TestMgmtIPInfraRangeFail -f $NodeManagementIPAddress, $StartingAddress, $EndingAddress Log-Info $TestMgmtIPInfraRangeDetail -Type Warning } $status = if ($mgmtIPOutsideRange) { 'SUCCESS' } else { 'FAILURE' } $params = @{ Name = 'AzStackHci_Network_Test_New_DHCP_Validity_Infra_Pool' Title = 'Test DHCP Configuration Validity Mgmt IP Infra Pool' DisplayName = "Test DHCP Configuration Validity Mgmt IP Infra Pool" Severity = 'CRITICAL' Description = 'Checking Mgmt IPs are not in Infra IP Pool' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-prerequisites#network-requirements' TargetResourceID = "$StartingAddress-$EndingAddress" TargetResourceName = "DHCPDeploymentConfiguration" TargetResourceType = 'DHCPConfiguration' Timestamp = [datetime]::UtcNow Status = $status AdditionalData = @{ Source = $currentSession.ComputerName Resource = 'DHCPNodeManagementIP' Detail = $TestMgmtIPInfraRangeDetail Status = $status TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params $TestMgmtSubnet = TestMgmtSubnet -StartingAddress $NodeManagementIPAddress -EndingAddress $EndingAddress $status = if ($TestMgmtSubnet) { 'SUCCESS' } else { 'FAILURE' } $params = @{ Name = 'AzStackHci_Network_Test_New_DHCP_Validity_Infra_Subnet' Title = 'Test DHCP Configuration Validity Mgmt IP Infra Subnet' DisplayName = "Test DHCP Configuration Validity Mgmt IP Infra Subnet" Severity = 'CRITICAL' Description = 'Checking Mgmt IPs are in same subnet as infra IP Pool' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-prerequisites#network-requirements' TargetResourceID = "$StartingAddress-$EndingAddress" TargetResourceName = "DHCPDeploymentConfiguration" TargetResourceType = 'DHCPConfiguration' Timestamp = [datetime]::UtcNow Status = $status AdditionalData = @{ Source = "$($currentSession.ComputerName)AndCustomerNetwork" Resource = 'DHCPNodeManagementIPAndCustomerSubnet' Detail = if ($TestMgmtSubnet) { $lnTxt.TestMgmtSubnetPass -f $NodeManagementIPAddress, $EndingAddress } else { $lnTxt.TestMgmtSubnetFail -f $NodeManagementIPAddress, $EndingAddress } Status = $status TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params } } } return $instanceResults } catch { throw $_ } } # Initial tests to determine if Mgmt IP of new Node is OK # Below Tests are for Static IP Allocation (Non-DHCP) function TestMgmtIPForNewNode { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = "Specify starting Management IP Range")] [System.Collections.ArrayList] $IpPools, [System.Management.Automation.Runspaces.PSSession[]] $PSSession, [Hashtable] $NodeToManagementIPMap, [PSObject[]] $AtcHostIntents ) try { $instanceResults = @() $AdditionalData = @() $newNodeSession = $PSSession[0] [PSObject[]] $mgmtIntent = $AtcHostIntents | Where-Object { $_.TrafficType.Contains("Management") } $intentName = $mgmtIntent[0].Name $firstAdapterName = $mgmtIntent[0].Adapter[0] $sb = { $env:COMPUTERNAME ( Get-NetIPConfiguration | Where-Object { $_.IPv4DefaultGateway -ne $null -and $_.NetAdapter.Status -eq "Up" } ).IPv4Address.IPAddress } $NewNodeData = Invoke-Command $newNodeSession -ScriptBlock $sb $NodeName = $NewNodeData[0] $NodeManagementIPAddress = $NewNodeData[1] Log-Info "Node Name retrieved from PSSession: $NodeName" Log-Info "Node Management IP Address retrieved from PSSession: $NodeManagementIPAddress" foreach ($ipPool in $IpPools) { $StartingAddress = $ipPool.StartingAddress $EndingAddress = $ipPool.EndingAddress # Check node management IP is not in infra pool range Log-Info "Starting Test Mgmt IP is not in Infra IP Pool for $($newNodeSession.ComputerName)" $ip = [system.net.ipaddress]::Parse($NodeManagementIPAddress).GetAddressBytes() [array]::Reverse($ip) $ip = [system.BitConverter]::ToUInt32($ip, 0) $from = [system.net.ipaddress]::Parse($StartingAddress).GetAddressBytes() [array]::Reverse($from) $from = [system.BitConverter]::ToUInt32($from, 0) $to = [system.net.ipaddress]::Parse($EndingAddress).GetAddressBytes() [array]::Reverse($to) $to = [system.BitConverter]::ToUInt32($to, 0) $mgmtIPOutsideRange = ($ip -le $from) -or ($ip -ge $to) if ($mgmtIPOutsideRange) { $TestMgmtIPInfraRangeDetail = $lnTxt.TestMgmtIPInfraRangePass -f $NodeManagementIPAddress, $StartingAddress, $EndingAddress $status = 'SUCCESS' } else { $TestMgmtIPInfraRangeDetail = $lnTxt.TestMgmtIPInfraRangeFail -f $NodeManagementIPAddress, $StartingAddress, $EndingAddress Log-Info $TestMgmtIPInfraRangeDetail -Type Warning $status = 'FAILURE' } $params = @{ Name = 'AzStackHci_Network_Test_New_Node_Validity_Outside_Mgmt_Range' Title = 'Test New Node Configuration Outside Management Range' DisplayName = "Test New Node Configuration Outside Management Range" Severity = 'CRITICAL' Description = 'Checking New Node IP' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-prerequisites#network-requirements' TargetResourceID = $NodeManagementIPAddress TargetResourceName = "IPAddress" TargetResourceType = 'IPAddress' Timestamp = [datetime]::UtcNow Status = $status AdditionalData = @{ Source = $NodeName Resource = 'NewNodeManagementIP' Detail = $TestMgmtIPInfraRangeDetail Status = $status TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params } # Check that no management IPs are the same (Mgmt IP shouldn't conflict with existing node) Log-Info "Starting Test for No Mgmt IPs are the same for any Nodes" $duplicateIPs = $false $numDuplicates = $NodeToManagementIPMap.GetEnumerator() | Group-Object Value | ? { $_.Count -gt 1 } if ($numDuplicates -ne $null) { $duplicateIPs = $true Log-Info 'Duplicate IPs found for Node Management IPs' -Type Warning } if ($duplicateIPs) { $dtl = 'Duplicate IPs found for Node Management IPs' $status = 'FAILURE' } else { $dtl = 'No Duplicate IPs found for Node Management IPs' $status = 'SUCCESS' } $params = @{ Name = 'AzStackHci_Network_Test_New_Node_Validity_Duplicate_IP' Title = 'Test New Node Configuration Duplicate IP' DisplayName = "Test New Node Configuration Duplicate IP" Severity = 'CRITICAL' Description = 'Checking New Node IP is not a duplicate' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-prerequisites#network-requirements' TargetResourceID = $NodeManagementIPAddress TargetResourceName = "IPAddress" TargetResourceType = 'IPAddress' Timestamp = [datetime]::UtcNow Status = $status AdditionalData = @{ Source = 'NodeAndManagementIPMapping' Resource = 'NodeManagementIPs' Detail = $dtl Status = $status TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params # Check that host name exists, and the name and mgmt IP both match current node Log-Info "Starting Test to check if Mgmt IP is on a different node as $NodeName" Log-Info "Starting simultaneous Test to check if HostName and Mgmt IP Match for $NodeName" $ipOnAnotherNode = $false $NodeNameAndIPMatches = $false $nodeNameForIP = $null foreach ($NodeIP in $NodeToManagementIPMap.GetEnumerator()) { Write-Host "$($NodeIP.Name): $($NodeIP.Value)" if ($NodeIP.Name -eq $NodeName) { if ($NodeIP.Value -eq $NodeManagementIPAddress) { $NodeNameAndIPMatches = $true $nodeNameForIP = $NodeIP.Name } } else { if ($NodeIP.Value -eq $NodeManagementIPAddress) { $ipOnAnotherNode = $true $nodeNameForIP = $NodeIP.Name } } } if ($ipOnAnotherNode) { $CheckMgmtIPNotOnOtherNodeDetail = $lnTxt.CheckMgmtIPNotOnOtherNodeFail -f $NodeManagementIPAddress, $nodeNameForIP Log-Info $CheckMgmtIPNotOnOtherNodeDetail -Type Warning } else { $CheckMgmtIPNotOnOtherNodeDetail = $lnTxt.CheckMgmtIPNotOnOtherNodePass -f $NodeManagementIPAddress, $nodeNameForIP } $status = if ($ipOnAnotherNode) { 'FAILURE' } else { 'SUCCESS' } $params = @{ Name = 'AzStackHci_Network_Test_New_Node_Validity_IP_Conflict' Title = 'Test New Node Configuration Conflicting IP' DisplayName = "Test New Node Configuration Conflicting IP" Severity = 'CRITICAL' Description = 'Checking New Node IP is not on another node' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-prerequisites#network-requirements' TargetResourceID = $NodeManagementIPAddress TargetResourceName = "IPAddress" TargetResourceType = 'IPAddress' Timestamp = [datetime]::UtcNow Status = $status AdditionalData = @{ Source = 'NodeAndManagementIPMapping' Resource = 'NodeNameAndManagementIP' Detail = $CheckMgmtIPNotOnOtherNodeDetail Status = $status TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params if ($NodeNameAndIPMatches) { $CheckMgmtIPOnNewNodeDetail = $lnTxt.CheckMgmtIPOnNewNodePass -f $NodeManagementIPAddress, $nodeNameForIP $status = 'SUCCESS' } else { $CheckMgmtIPOnNewNodeDetail = $lnTxt.CheckMgmtIPOnNewNodeFail -f $NodeManagementIPAddress, $nodeNameForIP Log-Info $CheckMgmtIPOnNewNodeDetail -Type Warning $status = 'FAILURE' } $params = @{ Name = 'AzStackHci_Network_Test_New_Node_And_IP_Match' Title = 'Test New Node Configuration Name and IP Match' DisplayName = "Test New Node Configuration Name and IP Match" Severity = 'CRITICAL' Description = 'Checking New Node Name and IP match' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-prerequisites#network-requirements' TargetResourceID = $NodeManagementIPAddress TargetResourceName = "IPAddress" TargetResourceType = 'IPAddress' Timestamp = [datetime]::UtcNow Status = $status AdditionalData = @{ Source = 'NodeAndManagementIPMapping' Resource = 'NewNodeNameAndManagementIP' Detail = $CheckMgmtIPOnNewNodeDetail Status = $status TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params # Check that New Node has the first physical adapter and the physical adapter has the mgmt IP Log-Info "Starting Test to see if $firstAdapterName on $NodeName has the correct Mgmt IP" $adapterSB = { param($adapterName) $returnDict = @{} $returnDict["GetNetIPAddressOutput"] = Get-NetIPAddress $returnDict["GetNetAdapterOutput"] = Get-NetAdapter $AdapterIPObject = Get-NetIPAddress -InterfaceAlias $adapterName -AddressFamily IPv4 -ErrorAction SilentlyContinue if ($AdapterIPObject -eq $null) { $returnDict["Result"] = $false $returnDict["AdapterName"] = $adapterName return $returnDict } $returnDict["Result"] = $true $returnDict["AdapterName"] = $adapterName $returnDict["AdapterIP"] = $AdapterIPObject.IPAddress return $returnDict } $AdapterContainsMgmtIP = $false $physicalAdapterExists = $false $VirtualNICName = "vManagement($intentName)" try { $NewNodeAdapterData = Invoke-Command $newNodeSession -ScriptBlock $adapterSB -ArgumentList $firstAdapterName Log-Info "Data found for New Node Adapter ($firstAdapterName): $($NewNodeAdapterData | Out-String)" if ($NewNodeAdapterData['Result'] -eq $false) { Log-Info "Physical Adapter Not Found" Log-Info "Get-NetIPAddress output: $($NewNodeAdapterData['GetNetIPAddressOutput'] | Out-String)" Log-Info "Get-NetAdapter output: $($NewNodeAdapterData['GetNetAdapterOutput'] | Out-String)" } elseif ($NewNodeAdapterData['Result'] -eq $true -and $NewNodeAdapterData['AdapterIP'] -eq $NodeManagementIPAddress) { Log-Info "Physical Adapter found with Correct IP: $($NewNodeAdapterData['AdapterIP'] | Out-String)" $physicalAdapterExists = $true $AdapterContainsMgmtIP = $true $CheckAdapterContainsIPDetail = $lnTxt.CheckAdapterContainsIPPass -f $firstAdapterName, $NodeManagementIPAddress } else { Log-Info "Physical Adapter found but with incorrect IP" Log-Info "Get-NetIPAddress output: $($NewNodeAdapterData['GetNetIPAddressOutput'] | Out-String)" Log-Info "Get-NetAdapter output: $($NewNodeAdapterData['GetNetAdapterOutput'] | Out-String)" } # In certain cases, new node will be set up with the vNIC instead and need to check that for mgmt IP if (!$physicalAdapterExists) { Log-Info "Physical Adapter does not exist or mgmt IP is wrong. Checking Virtual Adapter" -Type Warning $NewNodeVirtualAdapterData = Invoke-Command $newNodeSession -ScriptBlock $adapterSB -ArgumentList $VirtualNICName Log-Info "Data found for New Node Virtual Adapter ($VirtualNICName): $($NewNodeVirtualAdapterData | Out-String)" if ($NewNodeVirtualAdapterData['Result'] -eq $false) { Log-Info "Virtual Adapter Not Found" Log-Info "Get-NetIPAddress output: $($NewNodeVirtualAdapterData['GetNetIPAddressOutput'] | Out-String)" Log-Info "Get-NetAdapter output: $($NewNodeVirtualAdapterData['GetNetAdapterOutput'] | Out-String)" } elseif ($NewNodeVirtualAdapterData['Result'] -eq $true -and $NewNodeVirtualAdapterData['AdapterIP'] -eq $NodeManagementIPAddress) { Log-Info "Virtual Adapter found with Correct IP: $($NewNodeVirtualAdapterData['AdapterIP'] | Out-String)" $AdapterContainsMgmtIP = $true $CheckAdapterContainsIPDetail = $lnTxt.CheckAdapterContainsIPPass -f $VirtualNICName, $NodeManagementIPAddress } else { Log-Info "Virtual Adapter found but with incorrect IP" Log-Info "Get-NetIPAddress output: $($NewNodeVirtualAdapterData['GetNetIPAddressOutput'] | Out-String)" Log-Info "Get-NetAdapter output: $($NewNodeVirtualAdapterData['GetNetAdapterOutput'] | Out-String)" } } } catch { Log-Info "Exception thrown when checking New Node Adapter: $_" -Type Warning } if (!$AdapterContainsMgmtIP) { $CheckAdapterContainsIPDetail = $lnTxt.CheckAdapterContainsIPFail -f $firstAdapterName, $VirtualNICName, $NodeManagementIPAddress Log-Info $CheckAdapterContainsIPDetail -Type Warning $status = 'FAILURE' } else { $status = 'SUCCESS' } $params = @{ Name = 'AzStackHci_Network_Test_New_Node_First_Adapter_Validity' Title = 'Test New Node Configuration First Network Adapter has Mgmt IP' DisplayName = "Test New Node Configuration First Network Adapter has Mgmt IP" Severity = 'CRITICAL' Description = 'Checking New Node first adapter has mgmt IP' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-checklist' TargetResourceID = $NodeManagementIPAddress TargetResourceName = $firstAdapterName TargetResourceType = 'Network Adapter' Timestamp = [datetime]::UtcNow Status = $status AdditionalData = @{ Source = 'NewNodeAdapter' Resource = 'NewNodeAdapterIP' Detail = $CheckAdapterContainsIPDetail Status = $status TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params return $instanceResults } catch { throw $_ } } function TestMgmtSubnet { <# .SYNOPSIS Ensure Start and End IPs are on the same subnet. #> param ( [Parameter(Mandatory = $false, HelpMessage = "Specify starting Management IP Range")] [System.Net.IPAddress] $StartingAddress, [Parameter(Mandatory = $false, HelpMessage = "Specify end Management IP Range")] [System.Net.IPAddress] $EndingAddress ) try { $start = $StartingAddress -replace "\.[0-9]{1,3}$", "" $end = $EndingAddress -replace "\.[0-9]{1,3}$", "" if ($start -eq $end) { Log-info "Subnet start: $start and end: $end" return $true } else { return $false } } catch { throw "Failed to check subnet. Error: $_" } } function TestMgmtIpPools { <# .SYNOPSIS Ensure all ip are in management subnet. #> param ( [Parameter(Mandatory = $false, HelpMessage = "Specify starting Management IP Range")] [System.Collections.ArrayList] $IpPools, [Parameter(Mandatory = $false, HelpMessage = "Specify Management Subnet")] [string] $ManagementSubnetValue ) try { $allIps = GetMgmtIpRangeFromPools -IpPools $IpPools $uniqueIPs = @{} $firstIp = $IpPools[0].StartingAddress $match = $firstIp -replace "\.[0-9]{1,3}$", "" foreach ($ip in $allIps) { $ipString = $ip.ToString() if ($uniqueIPs.ContainsKey($ipString)) { return $false } else { $uniqueIPs[$ipString] = $true } # Test to make sure all ips in the management subnet in the DHCP scenario $toMatch = $ip -replace "\.[0-9]{1,3}$", "" if ($toMatch -ne $match) { return $false } } # More reliable test to make sure all ips in the management pool in non-DHCP scenarios if (-not ([string]::IsNullOrEmpty($ManagementSubnetValue))) { foreach ($ipPool in $IpPools) { $StartingAddress = $ipPool.StartingAddress $EndingAddress = $ipPool.EndingAddress if (!(Check-IPInRange -IPAddress $StartingAddress -Range $ManagementSubnetValue)) { return $false } if (!(Check-IPInRange -IPAddress $EndingAddress -Range $ManagementSubnetValue)) { return $false } } } return $true } catch { throw "Failed to check ip pools. Error: $_" } } function Check-IPInRange { param( [Parameter(Mandatory=$true)] [string] $IPAddress, # Range in which to search using CIDR notation. (ippaddr/bits) [Parameter(Mandatory=$true)] [string] $Range ) # Split range into the address and the CIDR notation [String]$CIDRAddress = $Range.Split('/')[0] [int]$CIDRBits = $Range.Split('/')[1] # Address from range and the search address are converted to Int32 and the full mask is calculated from the CIDR notation. [int]$BaseAddress = [System.BitConverter]::ToInt32((([System.Net.IPAddress]::Parse($CIDRAddress)).GetAddressBytes()), 0) [int]$Address = [System.BitConverter]::ToInt32(([System.Net.IPAddress]::Parse($IPAddress).GetAddressBytes()), 0) [int]$Mask = [System.Net.IPAddress]::HostToNetworkOrder(-1 -shl ( 32 - $CIDRBits)) return (($BaseAddress -band $Mask) -eq ($Address -band $Mask)) } function GetMgmtIpRangeFromPools { param ( [Parameter(Mandatory = $false, HelpMessage = "Specify starting Management IP Range")] [System.Collections.ArrayList] $IpPools ) $result = @() foreach ($ipPool in $IpPools) { $result += GetMgmtIpRange -StartingAddress $ipPool.StartingAddress -EndingAddress $ipPool.EndingAddress } return $result } function GetMgmtIpRange { param ( [Parameter(Mandatory = $false, HelpMessage = "Specify starting Management IP Range")] [System.Net.IPAddress] $StartingAddress, [Parameter(Mandatory = $false, HelpMessage = "Specify end Management IP Range")] [System.Net.IPAddress] $EndingAddress ) try { $first3 = $StartingAddress -replace "\.[0-9]{1,3}$", "" $start = $StartingAddress -split "\." | Select-Object -Last 1 $end = $EndingAddress -split "\." | Select-Object -Last 1 $range = $start..$end | ForEach-Object { ([System.Net.IPAddress]("{0}.{1}" -f $first3, $PSITEM)).IPAddressToString } Log-info "Start: $start and end: $end gives range: $($range -join ',')" return $range } catch { throw "Failed to get Mgmt range. Error: $_" } } function TestMgmtRangeSize { <# .SYNOPSIS Ensure IP range is within boundaries. #> param ( [Parameter(Mandatory = $false, HelpMessage = "Specify starting Management IP Range")] [System.Collections.ArrayList] $IpPools, [int] $Minimum = 5, [int] $Maximum = 16 ) try { $totalCount = 0 foreach ($ipPool in $IpPools) { $StartingAddress = $ipPool.StartingAddress $EndingAddress = $ipPool.EndingAddress $start = $StartingAddress -split "\." | Select-Object -Last 1 $end = $EndingAddress -split "\." | Select-Object -Last 1 $hostCount = ($start..$end).count Log-info "Start: $start and end: $end gives host count: $hostcount" $totalCount += $hostCount } if ($totalCount -gt $Maximum -or $totalCount -lt $Minimum) { return $false } else { return $true } } catch { throw "Failed to check range size. Error: $_" } } function IsTcpPortInUse { param( [System.Net.IPAddress] $Ip, [int] $Port = 5986, [int] $Timeout = 500 ) try { $tcpClient = New-Object System.Net.Sockets.TcpClient $portOpened = $tcpClient.ConnectAsync($ip, $p).Wait($timeout) $tcpClient.Dispose() return ($portOpened -contains $true) } catch { throw "Failed to check TCP ports. Error: $_" } } function TestNetworkIntentStatus { <# .SYNOPSIS This test is run in the AddNode context only. This test validates if the intents configured on the existing cluster and the new node to be added are not in errored state. .DESCRIPTION This test performs the following Validations: 1) Check the ATC Intent status on existing nodes are successfully allocated 2) Check if NetworkATC service is running on the new node 3) Check if the existing nodes have storage intent configured in them. .PARAMETERS [System.Management.Automation.Runspaces.PSSession[]] $PSSession #> [CmdletBinding()] param ( [System.Management.Automation.Runspaces.PSSession[]] $PSSession ) try { $sessionToCheck = $PSSession[0] Log-Info "Checking ATC Intent status on existing nodes and if NetworkATC service is running on the new node." $instanceResults = @() $AdditionalData = @() # Get the names of all nodes with an Up Status $activeNodes = (Get-ClusterNode | Where-Object {$_.State -eq "Up"}).Name Log-Info "Active nodes: $($activeNodes | Out-String)" # Get all intents on the active nodes $intents = Get-NetIntentStatus | Where-Object {$activeNodes -contains $_.Host} # Checks the intent status on the existing nodes. foreach ($intent in $intents) { $intentHealthy = $true if ($intent.ConfigurationStatus -ne "Success" -or $intent.ProvisioningStatus -ne "Completed") { $intentHealthy = $false $TestNetworkIntentStatusDetail = $lnTxt.TestNetworkIntentStatusFail -f $intent.Host, $intent.ConfigurationStatus, $intent.ProvisioningStatus Log-Info $TestNetworkIntentStatusDetail -Type Warning } else { $intentHealthy = $true $TestNetworkIntentStatusDetail = $lnTxt.TestNetworkIntentStatusPass -f $intent.Host, $intent.ConfigurationStatus, $intent.ProvisioningStatus } $params = @{ Name = 'AzStackHci_Network_Test_Network_AddNode_Intent_Status' Title = 'Test Network intent on existing nodes' DisplayName = 'Test Network intent on existing nodes' Severity = 'CRITICAL' Description = 'Checking if Network intent is unhealthy on existing nodes' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-checklist' TargetResourceID = 'NetworkIntent' TargetResourceName = 'NetworkIntent' TargetResourceType = 'NetworkIntent' Timestamp = [datetime]::UtcNow Status = if ($intentHealthy) { 'SUCCESS' } else { 'FAILURE' } AdditionalData = @{ Source = $intent.Host Resource = 'AddNodeIntentStatusCheck' Detail = $TestNetworkIntentStatusDetail Status = if ($intentHealthy) { 'SUCCESS' } else { 'FAILURE' } TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params } Log-Info "Checking if the storage intent is configured on the existing cluster before add node." $storageIntent = $intents | Where-Object {$_.IsStorageIntentSet -eq $true} try { $source = Get-Cluster } catch { $source = $Env:COMPUTERNAME Log-Info "Error getting the cluster, we could be running this test in standalone mode on $($source)" } if ($null -eq $storageIntent) { $TestNetworkIntentStatusDetail = $lnTxt.TestStorageIntentNotConfigured -f $source Log-Info $TestNetworkIntentStatusDetail -Type Warning } else { $TestNetworkIntentStatusDetail = $lnTxt.TestStorageIntentConfigured -f $source Log-Info $TestNetworkIntentStatusDetail -Type Success } $params = @{ Name = 'AzStackHci_Network_Test_Network_AddNode_Storage_Intent' Title = 'Test Storage intent on existing nodes' DisplayName = 'Test Storage intent on existing nodes' Severity = 'CRITICAL' Description = 'Check if the storage intent is configured on the existing cluster before add node' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-checklist' TargetResourceID = 'StorageIntent' TargetResourceName = 'StorageIntent' TargetResourceType = 'StorageIntent' Timestamp = [datetime]::UtcNow Status = if ($null -eq $storageIntent) { 'FAILURE' } else { 'SUCCESS' } AdditionalData = @{ Source = $source Resource = 'AddNodeStorageIntentCheck' Detail = $TestNetworkIntentStatusDetail Status = if ($null -eq $storageIntent) { 'FAILURE' } else { 'SUCCESS' } TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params # Check if NetworkATC service is running on the new node $sb = { $retVal = New-Object psobject -Property @{ Pass = $true Status = [string]::Empty } $atcFeature = Get-WindowsFeature -Name NetworkATC if ($atcFeature.Installstate -eq "Installed") { $atcService = Get-Service NetworkATC -ErrorAction SilentlyContinue $retVal.Status = "Feature Installed Service $($atcService.Status)" } elseif ($atcFeature.Installstate -eq "Available") { $retVal.Status = "Feature Available" } else { $retVal.Pass = $false } return $retVal } $NetworkATCStatus = Invoke-Command $sessionToCheck -ScriptBlock $sb $ATCStatusHealthy = $true if (!$NetworkATCStatus.Pass) { # NetworkATC feature not Installed, not Available on the system $ATCStatusHealthy = $false $TestNetworkATCServiceDetail = $lnTxt.TestNetworkATCFeatureNotInSystem -f $sessionToCheck.ComputerName Log-Info $TestNetworkATCServiceDetail -Type Warning } elseif (-not (($NetworkATCStatus.Status -eq 'Feature Installed Service Running') -or ($NetworkATCStatus.Status -eq 'Feature Available'))) { # NetworkATC feature installed but service not 'Running', or feature not available $ATCStatusHealthy = $false $TestNetworkATCServiceDetail = $lnTxt.TestNetworkATCFeatureServiceStatus -f $NetworkATCStatus.Status, $sessionToCheck.ComputerName Log-Info $TestNetworkATCServiceDetail -Type Warning } else { $ATCStatusHealthy = $true $TestNetworkATCServiceDetail = $lnTxt.TestNetworkATCFeatureServiceStatus -f $NetworkATCStatus.Status, $sessionToCheck.ComputerName Log-Info $TestNetworkATCServiceDetail -Type Success } $params = @{ Name = 'AzStackHci_Network_Test_Network_AddNode_NetworkATC_Service' Title = 'Test NetworkATC service is running on new node' DisplayName = 'Test NetworkATC service is running on new node' Severity = 'CRITICAL' Description = 'Check NetworkATC service is running on new node' Tags = @{} Remediation = 'https://learn.microsoft.com/en-us/azure-stack/hci/deploy/deployment-tool-checklist' TargetResourceID = 'NetworkATCService' TargetResourceName = 'NetworkATCService' TargetResourceType = 'NetworkATCService' Timestamp = [datetime]::UtcNow Status = if ($ATCStatusHealthy) { 'SUCCESS' } else { 'FAILURE' } AdditionalData = @{ Source = $sessionToCheck.ComputerName Resource = 'AddNodeNewNodeNetworkATCServiceCheck' Detail = $TestNetworkATCServiceDetail Status = if ($ATCStatusHealthy) { 'SUCCESS' } else { 'FAILURE' } TimeStamp = [datetime]::UtcNow } HealthCheckSource = $ENV:EnvChkrId } $instanceResults += New-AzStackHciResultObject @params return $instanceResults } catch { throw $_ } } function CheckNetAdapterRDMAStatus { param ( [PSObject[]] $IntentsInfoFromJson ) $retVal = New-Object psobject -Property @{ Pass = $true Message = "on $($ENV:COMPUTERNAME)" } enum NetworkDirectEnabledState { Disabled = 0; Enabled = 1 } # Read RDMA state info for all adapters [PSObject[]] $allAdapterRdmaInfo = Get-NetAdapterRdma [System.Boolean] $validSystemRdmaConfig = $true # need to check each adapter for all intents foreach ($currentIntent in $IntentsInfoFromJson) { [System.String[]] $adaptersToCheck = $currentIntent.Adapter $rdmaInfoForAdaptersToCheck = $allAdapterRdmaInfo | Where-Object { $_.Name -in $adaptersToCheck } [Boolean] $currentIntentAdapterOverride = $currentIntent.OverrideAdapterProperty [System.Int32] $currentIntentNetworkDirectOverride = 0 if (-Not [System.String]::IsNullOrEmpty($currentIntent.AdapterPropertyOverrides.NetworkDirect)) { $currentIntentNetworkDirectOverride = [System.Int32] [NetworkDirectEnabledState] $currentIntent.AdapterPropertyOverrides.NetworkDirect } $retVal.Message += "`n Intent $($currentIntent.Name) Adapter Override -> $currentIntentAdapterOverride; NetworkDirect -> $currentIntentNetworkDirectOverride" foreach ($currentRdmaInfo in $rdmaInfoForAdaptersToCheck) { # The following conditions are valid for RDMA configuration: # RDMA Enabled | RDMA OperationalStatus | Override | OverrideValue # True | True | - | - # - | False | True | 0 # False | False | - | - (mgmt intent withouth storage intent) $rdmaEnabled = $currentRdmaInfo.Enabled $rdmaOperationalState = $currentRdmaInfo.OperationalState $validRdmaForCurrentAdapter = ($rdmaEnabled -and $rdmaOperationalState) -or ((-not $rdmaOperationalState) -and $currentIntentAdapterOverride -and $currentIntentNetworkDirectOverride -eq 0) -or ((-not $rdmaEnabled) -and (-not $rdmaOperationalState) -and ($currentIntent.TrafficType.Contains("Management")) -and (-not $currentIntent.TrafficType.Contains("Storage"))) if (-not $validRdmaForCurrentAdapter) { $retVal.Message += "`n Wrong configuration for adapter $($currentRdmaInfo.Name): RDMA Enabled -> $rdmaEnabled; RDMA OperationalState -> $rdmaOperationalState" } else { $retVal.Message += "`n Correct configuration for adapter $($currentRdmaInfo.Name): RDMA Enabled -> $rdmaEnabled, RDMA OperationalState -> $rdmaOperationalState" } $validSystemRdmaConfig = $validSystemRdmaConfig -and $validRdmaForCurrentAdapter } } if (-not $validSystemRdmaConfig) { $retVal.Pass = $false $retVal.Message = "`nERROR: RDMA setting on adapters are invalid " + $retVal.Message } else { $retVal.Pass = $true $retVal.Message = "`nPASS: RDMA setting on adapters are valid " + $retVal.Message } return $retVal } function CheckAdapterSymmetryAndBandwidth { param ( [PSObject[]] $IntentsInfoFromJson ) enum NetworkDirectEnabledState { Disabled = 0; Enabled = 1 } $nodeName = $env:COMPUTERNAME $retVal = New-Object psobject -Property @{ Pass = $true Message = "on $($nodeName)`n" } [PSObject[]] $allAdapterInfo = Get-NetAdapter foreach ($currentIntent in $IntentsInfoFromJson) { [System.String[]] $adaptersToCheck = $currentIntent.Adapter $intentAdapterInfoToCheck = $allAdapterInfo | Where-Object { $_.Name -in $adaptersToCheck } # Check adapter symmetry $retVal.Message += "`n--> Adapter Symmetry Check: Link speed and Component ID should be same for all adapters in the intent" $compIDFail = $false $linkSpeedFail = $false $expectedSpeed = $null $expectedComponentID = $null foreach ($nicInfo in $intentAdapterInfoToCheck) { if ($null -eq $expectedSpeed) { $expectedSpeed = $nicInfo.Speed } if ($null -eq $expectedComponentID) { $expectedComponentID = $nicInfo.ComponentID } if ($expectedSpeed -ne $nicInfo.Speed) { $linkSpeedFail = $true } if ($expectedComponentID -ne $nicInfo.ComponentID) { $compIDFail = $true } if ($linkSpeedFail -Or $compIDFail) { $retVal.Pass = $false } $retVal.Message += "`n -- $nodeName ($($nicInfo.Name),`t$($nicInfo.LinkSpeed),`t$($nicInfo.ComponentID))" } # Check adapter bandwidth # This is needed if current intent is for storage traffic and adapter property is not overridden with NetworkDirect Disabled [Boolean] $currentIntentAdapterOverride = $currentIntent.OverrideAdapterProperty [System.Int32] $currentIntentNetworkDirectOverride = 0 if (-Not [System.String]::IsNullOrEmpty($currentIntent.AdapterPropertyOverrides.NetworkDirect)) { $currentIntentNetworkDirectOverride = [System.Int32] [NetworkDirectEnabledState] $currentIntent.AdapterPropertyOverrides.NetworkDirect } $needCheckBandwidth = $currentIntent.TrafficType.Contains("Storage") -and (-not $currentIntentAdapterOverride -or $currentIntentNetworkDirectOverride -ne 0) if ($needCheckBandwidth) { $retVal.Message += "`n--> Adapter Bandwidth Check for storage adapters when RDMA enabled: Need to be 10Gbps or higher" foreach ($nicInfo in $intentAdapterInfoToCheck) { if ($nicInfo.Speed) { if ($nicInfo.Speed -lt 10000000000) { $retVal.Pass = $false } $retVal.Message += "`n -- $nodeName ($($nicInfo.Name),`t$($nicInfo.LinkSpeed))" } else { $retVal.Pass = $false $retVal.Message += "`n -- $nodeName ($($nicInfo.Name), Speed not available)" } } } } if ($retVal.Pass) { $retVal.Message = "`nPASS: Network adapter(s) are symmetric and meet bandwidth requirement " + $retVal.Message } else { $retVal.Message = "`nERROR: Network adapter(s) are not symmetric or do not meet bandwidth requirement " + $retVal.Message } return $retVal } function CheckHostNetworkConfigurationReadiness { param ( [PSObject[]] $IntentsInfoFromJson ) $retVal = New-Object psobject -Property @{ Pass = $true Message = "On $($ENV:COMPUTERNAME):" } [System.String[]] $intentAdapters = $IntentsInfoFromJson | ForEach-Object { $_.Adapter } | Select-Object -Unique [PSObject[]] $extSwitchInfo = @() if ((Get-Command Get-VMSwitch -ErrorAction SilentlyContinue) -and (Get-WindowsFeature -Name Hyper-V -ErrorAction SilentlyContinue).Installed) { $extSwitchInfo = Get-VMSwitch -SwitchType External } [System.String] $interimPassMessage = "" #region Check DNS client configuration [PSObject[]] $adapterDnsClientInfo = Get-DNSClient [System.String[]] $adpaterWithDNSClientInfo = $adapterDnsClientInfo.InterfaceAlias | Select-Object -Unique [System.String[]] $adaptersToCheck = @() if ($extSwitchInfo.Count -eq 0) { # In case there is no VMSwitch in the system, we will need to make sure all adapters used in intents are in the result of Get-DNSClient $adaptersToCheck = $intentAdapters } else { # if there is a VMSwitch, we will need to make sure that those adapters not in VMSwitch but in intent are in the result of Get-DNSClient [System.Guid[]] $switchAdapterGuids = $extSwitchInfo | ForEach-Object { $_.NetAdapterInterfaceGuid } [System.String[]] $adaptersNotInVMSwitchNames = Get-NetAdapter -Physical | Where-Object { $_.InterfaceGuid -notin $switchAdapterGuids } | ForEach-Object { $_.Name } $adaptersToCheck = $intentAdapters | Where-Object { $_ -in $adaptersNotInVMSwitchNames } } if ($adaptersToCheck.Count -eq 0) { #This means all the adapters defined in intent are used in VMSwitch $intentAdapterMissingDnsClient = $null } else { $intentAdapterMissingDnsClient = Compare-Object $adaptersToCheck $adpaterWithDNSClientInfo | Where-Object { $_.SideIndicator -eq "<=" } | ForEach-Object { $_.InputObject } } if ($intentAdapterMissingDnsClient.Count -gt 0) { $retVal.Pass = $false $retVal.Message += "`nERROR: DNS Client configuration is missing for the following adapter(s): $($intentAdapterMissingDnsClient -join ', ')" # in case of failure, return directly return $retVal } else { $interimPassMessage += "`nPASS: DNS Client configuration includes info for all adapters defined in intent" } #endregion #region Check Hyper-V running status by calling Get-VMHost if ((Get-Command Get-VMHost -ErrorAction SilentlyContinue) -and (Get-WindowsFeature -Name Hyper-V -ErrorAction SilentlyContinue).Installed) { [PSObject[]] $vmHostInfo = Get-VMHost -ErrorAction SilentlyContinue if ($vmHostInfo.Count -eq 0) { $retVal.Pass = $false $retVal.Message += "`nERROR: Hyper-V is not running correctly on the system" # in case of failure, return directly return $retVal } else { $interimPassMessage += "`nPASS: Hyper-V is running correctly on the system" } } else { $interimPassMessage += "`nWARNING: Hyper-V-PowerShell might not installed correctly on the system. Will skip VM host check." } #endregion #region Check VMSwitch readiness # At leas 1 VMSwitch is having the network adapter defined in the management intent # Or management intent adapters are not included in any VMSwitch if ($extSwitchInfo.Count -ge 1) { [System.String[]] $mgmtIntentAdapterNames = $IntentsInfoFromJson | Where-Object { $_.TrafficType.Contains("Management") } | ForEach-Object { $_.Adapter } | Select-Object -Unique [System.Boolean] $foundMgmtVMSwitch = $false foreach ($currentSwitchInfo in $extSwitchInfo) { [System.Guid[]] $currentSwitchAdapterGuids = $currentSwitchInfo | ForEach-Object { $_.NetAdapterInterfaceGuid } [System.String[]] $currentSwitchAdapterNames = Get-NetAdapter -Physical | Where-Object { $_.InterfaceGuid -in $currentSwitchAdapterGuids } | ForEach-Object { $_.Name } $tempRst = Compare-Object $mgmtIntentAdapterNames $currentSwitchAdapterNames | Where-Object { $_.SideIndicator -eq "<=" } | ForEach-Object { $_.InputObject } if ($tempRst.Count -eq 0) { $foundMgmtVMSwitch = $true break } } if ($foundMgmtVMSwitch) { $interimPassMessage += "`nPASS: At least 1 VMSwitch is having the network adapter defined in the management intent" } else { $retVal.Pass = $false $retVal.Message += "`nERROR: No VMSwitch is having the network adapter defined in the management intent" # in case of failure, return directly return $retVal } } #endregion #Region Check advanced property VlanId on adapters foreach ($pNIC in $intentAdapters) { $currentAdapterAdvancedPropertyVlanId = Get-NetAdapterAdvancedProperty -Name $pNIC -RegistryKeyword VlanId -ErrorAction SilentlyContinue if ($null -eq $currentAdapterAdvancedPropertyVlanId) { $retVal.Pass = $false $retVal.Message += "`nERROR: Cannot find advanced property VlanId for adapter $pNIC" # in case of failure, return directly return $retVal } } $interimPassMessage += "`nPASS: Advanced property VlanId for all adapters defined in intent are correct" #endregion #region Check pNIC are in the intent adapters [System.String[]] $allpNicInSystem = Get-NetAdapter -Physical -Name $intentAdapters -ErrorAction SilentlyContinue | ForEach-Object { $_.Name } $adapterCompareResult = Compare-Object $intentAdapters $allpNicInSystem | Where-Object { $_.SideIndicator -eq "<=" } | ForEach-Object { $_.InputObject } if ($adapterCompareResult.Count -gt 0) { $retVal.Pass = $false $retVal.Message += "`nERROR: The following adapter(s) are not physical adapter in the system: $($adapterCompareResult -join ', '). Intent adapters should be physical adapters in the system." # in case of failure, return directly return $retVal } else { $interimPassMessage += "`nPASS: All adapters defined in intent are physical NICs" } #endregion $retVal.Message += $interimPassMessage return $retVal } function ConfigureVMSwitchForTesting { [CmdletBinding()] param ( [System.String[]] $MgmtAdapterNames, [System.String] $MgmtIntentName ) [PSObject] $retVal = New-Object PSObject -Property @{ VMSwitchInfo = $null MgmtVlanId = 0 NeedCleanUp = $false IPReady = $false } $mgmtVlanId = 0 $existingPNICVlanId = Get-NetAdapterAdvancedProperty -RegistryKeyword VlanID -Name $MgmtAdapterNames[0] -ErrorAction SilentlyContinue if ($existingPNICVlanId -and $existingPNICVlanId.RegistryValue) { $mgmtVlanId = $existingPNICVlanId.RegistryValue[0] } $expectedVMSwitchName = "ConvergedSwitch($($MgmtIntentName))" $expectedMgmtVNicName = "vManagement($($MgmtIntentName))" $tmpVMSwitch = New-VMSwitch -Name $expectedVMSwitchName -NetAdapterName $MgmtAdapterNames -EnableEmbeddedTeaming $true -AllowManagementOS $true if ($tmpVMSwitch) { $retVal.VMSwitchInfo = $tmpVMSwitch $retVal.MgmtVlanId = $mgmtVlanId $retVal.NeedCleanUp = $true Rename-VMNetworkAdapter -ManagementOS -Name $expectedVMSwitchName -NewName $expectedMgmtVNicName Get-NetAdapter -name "vEthernet ($($expectedMgmtVNicName))" -ErrorAction SilentlyContinue | Rename-NetAdapter -NewName $expectedMgmtVNicName if ($mgmtVlanId -ne 0) { Set-VMNetworkAdapterIsolation -ManagementOS ` -VMNetworkAdapterName $expectedMgmtVNicName ` -IsolationMode Vlan ` -AllowUntaggedTraffic $true ` -DefaultIsolationID $mgmtVlanId } # In case of DHCP scenario, the new adapter might not get the IP address immediately # Wait for some time (15 seconds) to make sure the new IP is settled correctly. [System.Boolean] $currentIPReady = $false $ipStopWatch = [System.diagnostics.stopwatch]::StartNew() while (-not $currentIPReady -and ($ipStopWatch.Elapsed.TotalSeconds -lt 60)) { # If the vNIC has Manual or Dhcp IPv4 address with "Preferred" state, we consider it as "ready" $ipConfig = Get-NetIPAddress -InterfaceAlias $expectedMgmtVNicName | Where-Object { ($_.PrefixOrigin -eq "Manual" -or $_.PrefixOrigin -eq "Dhcp") -and $_.AddressFamily -eq "IPv4" -and $_.AddressState -eq "Preferred" } if ($ipConfig) { $currentIPReady = $true $retVal.IPReady = $true break } else { Start-Sleep -Seconds 3 } } if (-not $currentIPReady) { # should not get into here, but keep it here for safety Log-info "Cannot get the IP address bind to the vNIC after VMSwitch created. Please check the system manually." } else { Log-Info "VMSwitch created successfully. VMSwitch: $($expectedVMSwitchName), MgmtVNic: $($expectedMgmtVNicName)" } } return $retVal } function New-PsSessionWithRetriesInternal { param ( [System.String] $Node, [PSCredential] $Credential, [System.Int16] $Retries = 3, [System.Int16] $WaitSeconds = 10 ) for ($i=1; $i -le $Retries; $i++) { try { Trace-Execution "Creating PsSession ($i/$Retries) to $Node as $($Credential.UserName)..." $psSessionCreated = Microsoft.PowerShell.Core\New-PSSession -ComputerName $Node -Credential $Credential -ErrorAction Stop $computerNameFromSession = Microsoft.PowerShell.Core\Invoke-Command -Session $psSessionCreated -ScriptBlock { $ENV:COMPUTERNAME } -ErrorAction Stop $isAdminSession = Microsoft.PowerShell.Core\Invoke-Command -Session $psSessionCreated -ScriptBlock { ([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole] 'Administrator') } -ErrorAction Stop if (-not $isAdminSession) { throw ("PsSession was successful but user: {0} is not an administrator on computer {1} " -f $psSessionCreated.Runspace.ConnectionInfo.Credential.Username, $computerName) } break } catch { Trace-Execution "Creating PsSession ($i/$Retries) to $Node failed: $($_.exception.message)" $errMsg = $_.tostring() Start-Sleep -Seconds $WaitSeconds } } if ($psSessionCreated -and $computerNameFromSession -and $isAdminSession) { Trace-Execution ("PsSession to {0} created after {1} retries. (Remote machine name: {2})" -f $Node, ("$i/$retries"), $computerName) return $psSessionCreated } else { throw "Unable to create a valid session to $Node`: $errMsg" } } # SIG # Begin signature block # MIIoKgYJKoZIhvcNAQcCoIIoGzCCKBcCAQExDzANBglghkgBZQMEAgEFADB5Bgor # BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG # KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCBCEGjOhZsq2vKv # /jWBMLLJvC0RY5FFGBhrhJlkIHJF+KCCDXYwggX0MIID3KADAgECAhMzAAADrzBA # DkyjTQVBAAAAAAOvMA0GCSqGSIb3DQEBCwUAMH4xCzAJBgNVBAYTAlVTMRMwEQYD # VQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNy # b3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMTH01pY3Jvc29mdCBDb2RlIFNpZ25p # bmcgUENBIDIwMTEwHhcNMjMxMTE2MTkwOTAwWhcNMjQxMTE0MTkwOTAwWjB0MQsw # CQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9u # ZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMR4wHAYDVQQDExVNaWNy # b3NvZnQgQ29ycG9yYXRpb24wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB # AQDOS8s1ra6f0YGtg0OhEaQa/t3Q+q1MEHhWJhqQVuO5amYXQpy8MDPNoJYk+FWA # hePP5LxwcSge5aen+f5Q6WNPd6EDxGzotvVpNi5ve0H97S3F7C/axDfKxyNh21MG # 0W8Sb0vxi/vorcLHOL9i+t2D6yvvDzLlEefUCbQV/zGCBjXGlYJcUj6RAzXyeNAN # xSpKXAGd7Fh+ocGHPPphcD9LQTOJgG7Y7aYztHqBLJiQQ4eAgZNU4ac6+8LnEGAL # go1ydC5BJEuJQjYKbNTy959HrKSu7LO3Ws0w8jw6pYdC1IMpdTkk2puTgY2PDNzB # tLM4evG7FYer3WX+8t1UMYNTAgMBAAGjggFzMIIBbzAfBgNVHSUEGDAWBgorBgEE # AYI3TAgBBggrBgEFBQcDAzAdBgNVHQ4EFgQURxxxNPIEPGSO8kqz+bgCAQWGXsEw # RQYDVR0RBD4wPKQ6MDgxHjAcBgNVBAsTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEW # MBQGA1UEBRMNMjMwMDEyKzUwMTgyNjAfBgNVHSMEGDAWgBRIbmTlUAXTgqoXNzci # tW2oynUClTBUBgNVHR8ETTBLMEmgR6BFhkNodHRwOi8vd3d3Lm1pY3Jvc29mdC5j # b20vcGtpb3BzL2NybC9NaWNDb2RTaWdQQ0EyMDExXzIwMTEtMDctMDguY3JsMGEG # CCsGAQUFBwEBBFUwUzBRBggrBgEFBQcwAoZFaHR0cDovL3d3dy5taWNyb3NvZnQu # Y29tL3BraW9wcy9jZXJ0cy9NaWNDb2RTaWdQQ0EyMDExXzIwMTEtMDctMDguY3J0 # MAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIBAISxFt/zR2frTFPB45Yd # mhZpB2nNJoOoi+qlgcTlnO4QwlYN1w/vYwbDy/oFJolD5r6FMJd0RGcgEM8q9TgQ # 2OC7gQEmhweVJ7yuKJlQBH7P7Pg5RiqgV3cSonJ+OM4kFHbP3gPLiyzssSQdRuPY # 1mIWoGg9i7Y4ZC8ST7WhpSyc0pns2XsUe1XsIjaUcGu7zd7gg97eCUiLRdVklPmp # XobH9CEAWakRUGNICYN2AgjhRTC4j3KJfqMkU04R6Toyh4/Toswm1uoDcGr5laYn # TfcX3u5WnJqJLhuPe8Uj9kGAOcyo0O1mNwDa+LhFEzB6CB32+wfJMumfr6degvLT # e8x55urQLeTjimBQgS49BSUkhFN7ois3cZyNpnrMca5AZaC7pLI72vuqSsSlLalG # OcZmPHZGYJqZ0BacN274OZ80Q8B11iNokns9Od348bMb5Z4fihxaBWebl8kWEi2O # PvQImOAeq3nt7UWJBzJYLAGEpfasaA3ZQgIcEXdD+uwo6ymMzDY6UamFOfYqYWXk # ntxDGu7ngD2ugKUuccYKJJRiiz+LAUcj90BVcSHRLQop9N8zoALr/1sJuwPrVAtx # HNEgSW+AKBqIxYWM4Ev32l6agSUAezLMbq5f3d8x9qzT031jMDT+sUAoCw0M5wVt # CUQcqINPuYjbS1WgJyZIiEkBMIIHejCCBWKgAwIBAgIKYQ6Q0gAAAAAAAzANBgkq # hkiG9w0BAQsFADCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24x # EDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlv # bjEyMDAGA1UEAxMpTWljcm9zb2Z0IFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 # IDIwMTEwHhcNMTEwNzA4MjA1OTA5WhcNMjYwNzA4MjEwOTA5WjB+MQswCQYDVQQG # EwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwG # A1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSgwJgYDVQQDEx9NaWNyb3NvZnQg # Q29kZSBTaWduaW5nIFBDQSAyMDExMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC # CgKCAgEAq/D6chAcLq3YbqqCEE00uvK2WCGfQhsqa+laUKq4BjgaBEm6f8MMHt03 # a8YS2AvwOMKZBrDIOdUBFDFC04kNeWSHfpRgJGyvnkmc6Whe0t+bU7IKLMOv2akr # rnoJr9eWWcpgGgXpZnboMlImEi/nqwhQz7NEt13YxC4Ddato88tt8zpcoRb0Rrrg # OGSsbmQ1eKagYw8t00CT+OPeBw3VXHmlSSnnDb6gE3e+lD3v++MrWhAfTVYoonpy # 4BI6t0le2O3tQ5GD2Xuye4Yb2T6xjF3oiU+EGvKhL1nkkDstrjNYxbc+/jLTswM9 # sbKvkjh+0p2ALPVOVpEhNSXDOW5kf1O6nA+tGSOEy/S6A4aN91/w0FK/jJSHvMAh # dCVfGCi2zCcoOCWYOUo2z3yxkq4cI6epZuxhH2rhKEmdX4jiJV3TIUs+UsS1Vz8k # A/DRelsv1SPjcF0PUUZ3s/gA4bysAoJf28AVs70b1FVL5zmhD+kjSbwYuER8ReTB # w3J64HLnJN+/RpnF78IcV9uDjexNSTCnq47f7Fufr/zdsGbiwZeBe+3W7UvnSSmn # Eyimp31ngOaKYnhfsi+E11ecXL93KCjx7W3DKI8sj0A3T8HhhUSJxAlMxdSlQy90 # lfdu+HggWCwTXWCVmj5PM4TasIgX3p5O9JawvEagbJjS4NaIjAsCAwEAAaOCAe0w # ggHpMBAGCSsGAQQBgjcVAQQDAgEAMB0GA1UdDgQWBBRIbmTlUAXTgqoXNzcitW2o # ynUClTAZBgkrBgEEAYI3FAIEDB4KAFMAdQBiAEMAQTALBgNVHQ8EBAMCAYYwDwYD # VR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBRyLToCMZBDuRQFTuHqp8cx0SOJNDBa # BgNVHR8EUzBRME+gTaBLhklodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpL2Ny # bC9wcm9kdWN0cy9NaWNSb29DZXJBdXQyMDExXzIwMTFfMDNfMjIuY3JsMF4GCCsG # AQUFBwEBBFIwUDBOBggrBgEFBQcwAoZCaHR0cDovL3d3dy5taWNyb3NvZnQuY29t # L3BraS9jZXJ0cy9NaWNSb29DZXJBdXQyMDExXzIwMTFfMDNfMjIuY3J0MIGfBgNV # HSAEgZcwgZQwgZEGCSsGAQQBgjcuAzCBgzA/BggrBgEFBQcCARYzaHR0cDovL3d3 # dy5taWNyb3NvZnQuY29tL3BraW9wcy9kb2NzL3ByaW1hcnljcHMuaHRtMEAGCCsG # AQUFBwICMDQeMiAdAEwAZQBnAGEAbABfAHAAbwBsAGkAYwB5AF8AcwB0AGEAdABl # AG0AZQBuAHQALiAdMA0GCSqGSIb3DQEBCwUAA4ICAQBn8oalmOBUeRou09h0ZyKb # C5YR4WOSmUKWfdJ5DJDBZV8uLD74w3LRbYP+vj/oCso7v0epo/Np22O/IjWll11l # hJB9i0ZQVdgMknzSGksc8zxCi1LQsP1r4z4HLimb5j0bpdS1HXeUOeLpZMlEPXh6 # I/MTfaaQdION9MsmAkYqwooQu6SpBQyb7Wj6aC6VoCo/KmtYSWMfCWluWpiW5IP0 # wI/zRive/DvQvTXvbiWu5a8n7dDd8w6vmSiXmE0OPQvyCInWH8MyGOLwxS3OW560 # STkKxgrCxq2u5bLZ2xWIUUVYODJxJxp/sfQn+N4sOiBpmLJZiWhub6e3dMNABQam # ASooPoI/E01mC8CzTfXhj38cbxV9Rad25UAqZaPDXVJihsMdYzaXht/a8/jyFqGa # J+HNpZfQ7l1jQeNbB5yHPgZ3BtEGsXUfFL5hYbXw3MYbBL7fQccOKO7eZS/sl/ah # XJbYANahRr1Z85elCUtIEJmAH9AAKcWxm6U/RXceNcbSoqKfenoi+kiVH6v7RyOA # 9Z74v2u3S5fi63V4GuzqN5l5GEv/1rMjaHXmr/r8i+sLgOppO6/8MO0ETI7f33Vt # Y5E90Z1WTk+/gFcioXgRMiF670EKsT/7qMykXcGhiJtXcVZOSEXAQsmbdlsKgEhr # /Xmfwb1tbWrJUnMTDXpQzTGCGgowghoGAgEBMIGVMH4xCzAJBgNVBAYTAlVTMRMw # EQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVN # aWNyb3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMTH01pY3Jvc29mdCBDb2RlIFNp # Z25pbmcgUENBIDIwMTECEzMAAAOvMEAOTKNNBUEAAAAAA68wDQYJYIZIAWUDBAIB # BQCgga4wGQYJKoZIhvcNAQkDMQwGCisGAQQBgjcCAQQwHAYKKwYBBAGCNwIBCzEO # MAwGCisGAQQBgjcCARUwLwYJKoZIhvcNAQkEMSIEIHFAq5ykDU7tBidzmpdy6AKo # ymBzBBleU+zwzN3zt0ICMEIGCisGAQQBgjcCAQwxNDAyoBSAEgBNAGkAYwByAG8A # cwBvAGYAdKEagBhodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20wDQYJKoZIhvcNAQEB # BQAEggEAHSlOJgW513aMBcxD6f606EQvs0aRGOfrshqr84z0iYRIJDntYvMt5EFG # kFU+tSpPzQj3HhuAy6DeSa3rhTTzDLGqT8XZ8cn3PijOcdEi7XansxWCPsqtUJrQ # DqwfGphWrnTSI23DEIkht1gHWS34gl8aQtiqFbDPLIrou6MFqEq8Z6sHZBhOo2UJ # AT5CCSdo8zPmrB3s3/rmThNhS4G7LnK9Iou2e4ncjl+iA39wdPz9u+8UHB6it1gt # uaE/guLKZfGZil+vqjBPiQu6OjCUzjvrt6SPc6AImHFv1QbHHvKA5NxYlnfmVxK+ # akthenGD2bpCHsgrCHkBEwE8vw2D0KGCF5QwgheQBgorBgEEAYI3AwMBMYIXgDCC # F3wGCSqGSIb3DQEHAqCCF20wghdpAgEDMQ8wDQYJYIZIAWUDBAIBBQAwggFSBgsq # hkiG9w0BCRABBKCCAUEEggE9MIIBOQIBAQYKKwYBBAGEWQoDATAxMA0GCWCGSAFl # AwQCAQUABCAO8e94XBEoe9iAdRIJihuYH1PuEUH4L/reycbRV+CMcAIGZpVZgutc # GBMyMDI0MDcxNjE2MjY0MS43MTNaMASAAgH0oIHRpIHOMIHLMQswCQYDVQQGEwJV # UzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UE # ChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSUwIwYDVQQLExxNaWNyb3NvZnQgQW1l # cmljYSBPcGVyYXRpb25zMScwJQYDVQQLEx5uU2hpZWxkIFRTUyBFU046QTkzNS0w # M0UwLUQ5NDcxJTAjBgNVBAMTHE1pY3Jvc29mdCBUaW1lLVN0YW1wIFNlcnZpY2Wg # ghHqMIIHIDCCBQigAwIBAgITMwAAAekPcTB+XfESNgABAAAB6TANBgkqhkiG9w0B # AQsFADB8MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UE # BxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSYwJAYD # VQQDEx1NaWNyb3NvZnQgVGltZS1TdGFtcCBQQ0EgMjAxMDAeFw0yMzEyMDYxODQ1 # MjZaFw0yNTAzMDUxODQ1MjZaMIHLMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2Fz # aGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENv # cnBvcmF0aW9uMSUwIwYDVQQLExxNaWNyb3NvZnQgQW1lcmljYSBPcGVyYXRpb25z # MScwJQYDVQQLEx5uU2hpZWxkIFRTUyBFU046QTkzNS0wM0UwLUQ5NDcxJTAjBgNV # BAMTHE1pY3Jvc29mdCBUaW1lLVN0YW1wIFNlcnZpY2UwggIiMA0GCSqGSIb3DQEB # AQUAA4ICDwAwggIKAoICAQCsmowxQRVgp4TSc3nTa6yrAPJnV6A7aZYnTw/yx90u # 1DSH89nvfQNzb+5fmBK8ppH76TmJzjHUcImd845A/pvZY5O8PCBu7Gq+x5Xe6plQ # t4xwVUUcQITxklOZ1Rm9fJ5nh8gnxOxaezFMM41sDI7LMpKwIKQMwXDctYKvCyQy # 6kO2sVLB62kF892ZwcYpiIVx3LT1LPdMt1IeS35KY5MxylRdTS7E1Jocl30NgcBi # JfqnMce05eEipIsTO4DIn//TtP1Rx57VXfvCO8NSCh9dxsyvng0lUVY+urq/G8QR # FoOl/7oOI0Rf8Qg+3hyYayHsI9wtvDHGnT30Nr41xzTpw2I6ZWaIhPwMu5DvdkEG # zV7vYT3tb9tTviY3psul1T5D938/AfNLqanVCJtP4yz0VJBSGV+h66ZcaUJOxpbS # IjImaOLF18NOjmf1nwDatsBouXWXFK7E5S0VLRyoTqDCxHG4mW3mpNQopM/U1WJn # jssWQluK8eb+MDKlk9E/hOBYKs2KfeQ4HG7dOcK+wMOamGfwvkIe7dkylzm8BeAU # QC8LxrAQykhSHy+FaQ93DAlfQYowYDtzGXqE6wOATeKFI30u9YlxDTzAuLDK073c # ndMV4qaD3euXA6xUNCozg7rihiHUaM43Amb9EGuRl022+yPwclmykssk30a4Rp3v # 9QIDAQABo4IBSTCCAUUwHQYDVR0OBBYEFJF+M4nFCHYjuIj0Wuv+jcjtB+xOMB8G # A1UdIwQYMBaAFJ+nFV0AXmJdg/Tl0mWnG1M1GelyMF8GA1UdHwRYMFYwVKBSoFCG # Tmh0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMvY3JsL01pY3Jvc29mdCUy # MFRpbWUtU3RhbXAlMjBQQ0ElMjAyMDEwKDEpLmNybDBsBggrBgEFBQcBAQRgMF4w # XAYIKwYBBQUHMAKGUGh0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2lvcHMvY2Vy # dHMvTWljcm9zb2Z0JTIwVGltZS1TdGFtcCUyMFBDQSUyMDIwMTAoMSkuY3J0MAwG # A1UdEwEB/wQCMAAwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwgwDgYDVR0PAQH/BAQD # AgeAMA0GCSqGSIb3DQEBCwUAA4ICAQBWsSp+rmsxFLe61AE90Ken2XPgQHJDiS4S # bLhvzfVjDPDmOdRE75uQohYhFMdGwHKbVmLK0lHV1Apz/HciZooyeoAvkHQaHmLh # wBGkoyAAVxcaaUnHNIUS9LveL00PwmcSDLgN0V/Fyk20QpHDEukwKR8kfaBEX83A # yvQzlf/boDNoWKEgpdAsL8SzCzXFLnDozzCJGq0RzwQgeEBr8E4K2wQ2WXI/ZJxZ # S/+d3FdwG4ErBFzzUiSbV2m3xsMP3cqCRFDtJ1C3/JnjXMChnm9bLDD1waJ7TPp5 # wYdv0Ol9+aN0t1BmOzCj8DmqKuUwzgCK9Tjtw5KUjaO6QjegHzndX/tZrY792dfR # AXr5dGrKkpssIHq6rrWO4PlL3OS+4ciL/l8pm+oNJXWGXYJL5H6LNnKyXJVEw/1F # bO4+Gz+U4fFFxs2S8UwvrBbYccVQ9O+Flj7xTAeITJsHptAvREqCc+/YxzhIKkA8 # 8Q8QhJKUDtazatJH7ZOdi0LCKwgqQO4H81KZGDSLktFvNRhh8ZBAenn1pW+5UBGY # z2GpgcxVXKT1CuUYdlHR9D6NrVhGqdhGTg7Og/d/8oMlPG3YjuqFxidiIsoAw2+M # hI1zXrIi56t6JkJ75J69F+lkh9myJJpNkx41sSB1XK2jJWgq7VlBuP1BuXjZ3qgy # m9r1wv0MtTCCB3EwggVZoAMCAQICEzMAAAAVxedrngKbSZkAAAAAABUwDQYJKoZI # hvcNAQELBQAwgYgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAw # DgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRpb24x # MjAwBgNVBAMTKU1pY3Jvc29mdCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAy # MDEwMB4XDTIxMDkzMDE4MjIyNVoXDTMwMDkzMDE4MzIyNVowfDELMAkGA1UEBhMC # VVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNV # BAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEmMCQGA1UEAxMdTWljcm9zb2Z0IFRp # bWUtU3RhbXAgUENBIDIwMTAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC # AQDk4aZM57RyIQt5osvXJHm9DtWC0/3unAcH0qlsTnXIyjVX9gF/bErg4r25Phdg # M/9cT8dm95VTcVrifkpa/rg2Z4VGIwy1jRPPdzLAEBjoYH1qUoNEt6aORmsHFPPF # dvWGUNzBRMhxXFExN6AKOG6N7dcP2CZTfDlhAnrEqv1yaa8dq6z2Nr41JmTamDu6 # GnszrYBbfowQHJ1S/rboYiXcag/PXfT+jlPP1uyFVk3v3byNpOORj7I5LFGc6XBp # Dco2LXCOMcg1KL3jtIckw+DJj361VI/c+gVVmG1oO5pGve2krnopN6zL64NF50Zu # yjLVwIYwXE8s4mKyzbnijYjklqwBSru+cakXW2dg3viSkR4dPf0gz3N9QZpGdc3E # XzTdEonW/aUgfX782Z5F37ZyL9t9X4C626p+Nuw2TPYrbqgSUei/BQOj0XOmTTd0 # lBw0gg/wEPK3Rxjtp+iZfD9M269ewvPV2HM9Q07BMzlMjgK8QmguEOqEUUbi0b1q # GFphAXPKZ6Je1yh2AuIzGHLXpyDwwvoSCtdjbwzJNmSLW6CmgyFdXzB0kZSU2LlQ # +QuJYfM2BjUYhEfb3BvR/bLUHMVr9lxSUV0S2yW6r1AFemzFER1y7435UsSFF5PA # PBXbGjfHCBUYP3irRbb1Hode2o+eFnJpxq57t7c+auIurQIDAQABo4IB3TCCAdkw # EgYJKwYBBAGCNxUBBAUCAwEAATAjBgkrBgEEAYI3FQIEFgQUKqdS/mTEmr6CkTxG # NSnPEP8vBO4wHQYDVR0OBBYEFJ+nFV0AXmJdg/Tl0mWnG1M1GelyMFwGA1UdIARV # MFMwUQYMKwYBBAGCN0yDfQEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly93d3cubWlj # cm9zb2Z0LmNvbS9wa2lvcHMvRG9jcy9SZXBvc2l0b3J5Lmh0bTATBgNVHSUEDDAK # BggrBgEFBQcDCDAZBgkrBgEEAYI3FAIEDB4KAFMAdQBiAEMAQTALBgNVHQ8EBAMC # AYYwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBTV9lbLj+iiXGJo0T2UkFvX # zpoYxDBWBgNVHR8ETzBNMEugSaBHhkVodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20v # cGtpL2NybC9wcm9kdWN0cy9NaWNSb29DZXJBdXRfMjAxMC0wNi0yMy5jcmwwWgYI # KwYBBQUHAQEETjBMMEoGCCsGAQUFBzAChj5odHRwOi8vd3d3Lm1pY3Jvc29mdC5j # b20vcGtpL2NlcnRzL01pY1Jvb0NlckF1dF8yMDEwLTA2LTIzLmNydDANBgkqhkiG # 9w0BAQsFAAOCAgEAnVV9/Cqt4SwfZwExJFvhnnJL/Klv6lwUtj5OR2R4sQaTlz0x # M7U518JxNj/aZGx80HU5bbsPMeTCj/ts0aGUGCLu6WZnOlNN3Zi6th542DYunKmC # VgADsAW+iehp4LoJ7nvfam++Kctu2D9IdQHZGN5tggz1bSNU5HhTdSRXud2f8449 # xvNo32X2pFaq95W2KFUn0CS9QKC/GbYSEhFdPSfgQJY4rPf5KYnDvBewVIVCs/wM # nosZiefwC2qBwoEZQhlSdYo2wh3DYXMuLGt7bj8sCXgU6ZGyqVvfSaN0DLzskYDS # PeZKPmY7T7uG+jIa2Zb0j/aRAfbOxnT99kxybxCrdTDFNLB62FD+CljdQDzHVG2d # Y3RILLFORy3BFARxv2T5JL5zbcqOCb2zAVdJVGTZc9d/HltEAY5aGZFrDZ+kKNxn # GSgkujhLmm77IVRrakURR6nxt67I6IleT53S0Ex2tVdUCbFpAUR+fKFhbHP+Crvs # QWY9af3LwUFJfn6Tvsv4O+S3Fb+0zj6lMVGEvL8CwYKiexcdFYmNcP7ntdAoGokL # jzbaukz5m/8K6TT4JDVnK+ANuOaMmdbhIurwJ0I9JZTmdHRbatGePu1+oDEzfbzL # 6Xu/OHBE0ZDxyKs6ijoIYn/ZcGNTTY3ugm2lBRDBcQZqELQdVTNYs6FwZvKhggNN # MIICNQIBATCB+aGB0aSBzjCByzELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hp # bmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jw # b3JhdGlvbjElMCMGA1UECxMcTWljcm9zb2Z0IEFtZXJpY2EgT3BlcmF0aW9uczEn # MCUGA1UECxMeblNoaWVsZCBUU1MgRVNOOkE5MzUtMDNFMC1EOTQ3MSUwIwYDVQQD # ExxNaWNyb3NvZnQgVGltZS1TdGFtcCBTZXJ2aWNloiMKAQEwBwYFKw4DAhoDFQCr # aYf1xDk2rMnU/VJo2GGK1nxo8aCBgzCBgKR+MHwxCzAJBgNVBAYTAlVTMRMwEQYD # VQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVNaWNy # b3NvZnQgQ29ycG9yYXRpb24xJjAkBgNVBAMTHU1pY3Jvc29mdCBUaW1lLVN0YW1w # IFBDQSAyMDEwMA0GCSqGSIb3DQEBCwUAAgUA6kCArzAiGA8yMDI0MDcxNjA1MTYz # MVoYDzIwMjQwNzE3MDUxNjMxWjB0MDoGCisGAQQBhFkKBAExLDAqMAoCBQDqQICv # AgEAMAcCAQACAgIjMAcCAQACAhJLMAoCBQDqQdIvAgEAMDYGCisGAQQBhFkKBAIx # KDAmMAwGCisGAQQBhFkKAwKgCjAIAgEAAgMHoSChCjAIAgEAAgMBhqAwDQYJKoZI # hvcNAQELBQADggEBAHMjHjOsn+55Td5i91TkJ4xNrDkuJLJdS2vmlEe454xXPNlz # R9J+AVFRNLNVX4iPH2MwipoBSuoPX64jsvUWtzNVvvNcGBv0ilI/bCATPIOLab+i # O0J8FT2WPoUAK3f6czoqh0yUCpr7126WTHwknaU1uPMpFYwM9qtUakacIAghk2v2 # GX8kCwg6ywAqdix0GYnvBbJ57WspGP8zLu0O5hIc8pqY8DSt4joPrVmnNlIPiEhu # 6/CxYu9XV4wPZ00ygACPQ8fkHisDERTMof/INDFzbZB4WTggo+8JUs8Bjj3BLFo+ # +8Xxr7I2T6hSU+u1mkSLcvW2KQ/cQjfkGgkUTJAxggQNMIIECQIBATCBkzB8MQsw # CQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9u # ZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSYwJAYDVQQDEx1NaWNy # b3NvZnQgVGltZS1TdGFtcCBQQ0EgMjAxMAITMwAAAekPcTB+XfESNgABAAAB6TAN # BglghkgBZQMEAgEFAKCCAUowGgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEEMC8G # CSqGSIb3DQEJBDEiBCB8TEi0ZG7ovwG4156uOyYJ9U1f30JlU7oMjZUo4fSNNDCB # +gYLKoZIhvcNAQkQAi8xgeowgecwgeQwgb0EIKSQkniXaTcmj1TKQWF+x2U4riVo # rGD8TwmgVbN9qsQlMIGYMIGApH4wfDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldh # c2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBD # b3Jwb3JhdGlvbjEmMCQGA1UEAxMdTWljcm9zb2Z0IFRpbWUtU3RhbXAgUENBIDIw # MTACEzMAAAHpD3Ewfl3xEjYAAQAAAekwIgQg8QtYBz7gVE5cU514QWGfbdRRv3Lv # ha9HU1Zi8xsfCaAwDQYJKoZIhvcNAQELBQAEggIAn3KBkiyWzBGoJr1OUnLFlUKj # 5KK0UfaGT5JxMTH5ykb8AyQpiDiLM73dQs+W07Wun3eIlLZ94E/rys/b7BwyjEh9 # MstdiUYt4uDgBb9eWE9tNZnrw1auGMdKrAYUOvMGaw4VfBTGccietu9G5635RuzT # ha26b+yBPoDoBI+l6EfbezEIhLthwLmQIhJnbCeKo0r9QO/Hkg3L89lQsT7jXMLb # z1yGT9IXTDoizEjTooC7JtD3ZRGH4j+mZOifTcN/yYwdJNVLgLp8H9WmJQXRX/Y3 # 1qf2kBRUl4AMNl4qbJXhbyvPQx57VlBZ2LKyotk6JXDKEfGYynxhqT/aQVxoFczK # DyRXKQA55vhkvbWbRwpZVHIMSxb8H7FRIKTCgXPM5qGZ2AjQv+HIP6+x97VZaNQg # 6hUhYVD9a22Y2JWIN1cKpbqqivDplv7t/1ftoroHQiUHN5U3pQICOetl/3oAmnwZ # 6Y1UvjIs39qDTod7ucUiEmoQ1YfaSjbyqeDNEh6X13yB2NAVlwvd37HD+rGtMT8h # 6XWzF5G489Vc1CP35iDovtGhbxLvvH/sdt+2WDIGG3S9TrIWAQ8PAwa+nm1rUem/ # fEwR5XPPRiyqZmzArIhfN+LdOTz3oL24JiAr90bK+he6V4Q9GGT/Zzkg3A6NKCIZ # TjxT+nvr6CT4vwLdI5I= # SIG # End signature block |