Such tasks will obviously not have any failure from the test units so
take that into account.
Signed-off-by: Francois Gouget <fgouget(a)codeweavers.com>
---
testbot/tests/TestWTBS | 74 +++++++++++++++++++++++++++---------------
1 file changed, 48 insertions(+), 26 deletions(-)
diff --git a/testbot/tests/TestWTBS b/testbot/tests/TestWTBS
index 1467c4b4e..edf097de2 100755
--- a/testbot/tests/TestWTBS
+++ b/testbot/tests/TestWTBS
@@ -859,18 +859,6 @@ sub CheckTask($$$$)
{
my ($Task, $TaskType, $TestInfo, $TestUnits) = @_;
- my $TaskInfo = $TestInfo->{$TaskType};
- if (CheckValue($TaskInfo->{Status}))
- {
- is($Task->Status, $TaskInfo->{Status}, "Check Status of task ". TaskKeyStr($Task));
- }
- if ($Task->Status =~ /^bad/)
- {
- # It makes no sense to check which test units were run in case of a build
- # error.
- $TestUnits->{$TaskType}->{"*skipped*"} = 1;
- }
-
# Assume the VM's Missions field has not changed since the tests were run
my ($ErrMessage, $Missions) = ParseMissionStatement($Task->Missions);
if (@$Missions != 1)
@@ -879,22 +867,42 @@ sub CheckTask($$$$)
return;
}
- my %ReportTypes;
+ my (%ReportTypes, $IsTestTask);
if ($TaskType =~ /^win/)
{
+ $IsTestTask = 1 if ($TaskType =~ /^win(?:32|64)$/);
foreach my $Mission (@{$Missions->[0]->{Missions}})
{
my $ReportName = GetMissionBaseName($Mission) .".report";
my $MissionType = $TaskType;
- $MissionType .= ":". ($Mission->{test} || "test") if ($TaskType eq "wine");
+ if ($TaskType eq "wine")
+ {
+ my $Test = $Mission->{test} || "test";
+ $IsTestTask = 1 if ($Test =~ /^(?:test|module)/);
+ $MissionType .= ":$Test";
+ }
$ReportTypes{$ReportName} = $MissionType;
}
}
+ my $TestType = ($TaskType eq "wine" and !$IsTestTask) ?
+ "wine:build" : $TaskType;
+ my $TaskInfo = $TestInfo->{$TestType};
+
+ if (CheckValue($TaskInfo->{Status}))
+ {
+ is($Task->Status, $TaskInfo->{Status}, "Check Status of task ". TaskKeyStr($Task));
+ }
+ if ($Task->Status =~ /^bad/)
+ {
+ # It makes no sense to check which test units were run in case of a build
+ # error.
+ $TestUnits->{$TaskType}->{"*skipped*"} = 1;
+ }
my $CheckTimeouts = ($Task->Status eq "completed" and
CheckValue($TaskInfo->{HasTimeout}));
- my $ExpectedFailures;
+ my ($ReportFailures, $IgnoreFailures);
my ($ReportCount, $TimeoutCount, $NewFailures) = (0, 0, 0);
foreach my $LogName (@{GetLogFileNames($Task->GetDir())})
{
@@ -903,7 +911,7 @@ sub CheckTask($$$$)
$NewFailures += $LogInfo->{NewCount} || 0;
# Get the mission-specific "wine:xxx" report directives
- my $MissionType = $ReportTypes{$LogName} || $TaskType;
+ my $MissionType = $ReportTypes{$LogName} || $TestType;
my $MissionInfo = $TestInfo->{$MissionType};
my $LogType = "log";
if ($LogName =~ /\.report$/)
@@ -924,11 +932,11 @@ sub CheckTask($$$$)
ok(($LogInfo->{ErrCount} || 0) <= $MissionInfo->{TestFailures},
"Check Failures of $LogName in task ". TaskKeyStr($Task))
or diag("report error count = ", ($LogInfo->{ErrCount} || 0), ", expected at most $MissionInfo->{TestFailures}");
- $ExpectedFailures += $MissionInfo->{TestFailures};
+ $ReportFailures += $MissionInfo->{TestFailures};
}
else
{
- $ExpectedFailures = undef;
+ $IgnoreFailures = 1;
}
}
elsif ($LogName =~ /^testbot\./)
@@ -951,20 +959,34 @@ sub CheckTask($$$$)
}
if ($Task->Status eq "completed")
{
- if (defined $ExpectedFailures)
+ if ($IsTestTask and ($ReportCount or CheckValue($TaskInfo->{TestUnits})))
+ {
+ if ($IgnoreFailures)
+ {
+ # For some mission types (typically test=module) the failure count
+ # is unpredictable and cannot be checked.
+ ; # Nothing to do
+ }
+ elsif ($ReportCount)
+ {
+ # The task's failure count must be compared to the sum of the failure
+ # counts of each test report.
+ is($Task->TestFailures, $ReportFailures, "Check Failures of task ". TaskKeyStr($Task));
+ }
+ elsif (CheckValue($TaskInfo->{TestFailures}))
+ {
+ # There is no report if the VM crashed / rebooted
+ is($Task->TestFailures, $TaskInfo->{TestFailures}, "Check Failures of task ". TaskKeyStr($Task));
+ }
+ }
+ elsif (!$IsTestTask and $ReportCount)
{
- is($Task->TestFailures, $ExpectedFailures, "Check Failures of task ". TaskKeyStr($Task));
+ fail("A $TestType task should not have $ReportCount test reports");
}
elsif (CheckValue($TaskInfo->{TestFailures}) and !$ReportCount)
{
- # Scale the expected TestFailures count with the number of times the test
- # was run, i.e. $ReportCount, or take it as is if no report is available.
is($Task->TestFailures, $TaskInfo->{TestFailures}, "Check Failures of task ". TaskKeyStr($Task));
}
- # else there are reports for which we cannot check the TestFailures count.
- # In particular this can happen if the test failure count can be checked
- # for test=test but not for test=module. Then whether TestFailures can be
- # checked or not depends on the missions mix.
}
return $NewFailures;
}
--
2.20.1