forked from chromium/chromium
-
Notifications
You must be signed in to change notification settings - Fork 23
/
Copy pathbase.py
2378 lines (2027 loc) · 99 KB
/
base.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Abstract base class for Port classes.
The Port classes encapsulate Port-specific (platform-specific) behavior
in the web test infrastructure.
"""
import time
import collections
import json
import logging
import optparse
import os
import re
import sys
import tempfile
from collections import defaultdict
import six
from six.moves import zip_longest
from blinkpy.common import exit_codes
from blinkpy.common import find_files
from blinkpy.common import read_checksum_from_png
from blinkpy.common import path_finder
from blinkpy.common.memoized import memoized
from blinkpy.common.system.executive import ScriptError
from blinkpy.common.system.path import abspath_to_uri
from blinkpy.w3c.wpt_manifest import WPTManifest, MANIFEST_NAME
from blinkpy.web_tests.layout_package.bot_test_expectations import BotTestExpectationsFactory
from blinkpy.web_tests.models.test_configuration import TestConfiguration
from blinkpy.web_tests.models.test_run_results import TestRunException
from blinkpy.web_tests.models.typ_types import TestExpectations, ResultType
from blinkpy.web_tests.port import driver
from blinkpy.web_tests.port import server_process
from blinkpy.web_tests.port.factory import PortFactory
from blinkpy.web_tests.servers import apache_http
from blinkpy.web_tests.servers import pywebsocket
from blinkpy.web_tests.servers import wptserve
from blinkpy.web_tests.skia_gold import blink_skia_gold_properties as sgp
from blinkpy.web_tests.skia_gold import blink_skia_gold_session_manager as sgsm
_log = logging.getLogger(__name__)
# Path relative to the build directory.
CONTENT_SHELL_FONTS_DIR = "test_fonts"
FONT_FILES = [
[[CONTENT_SHELL_FONTS_DIR], 'Ahem.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Arimo-Bold.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Arimo-BoldItalic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Arimo-Italic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Arimo-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Cousine-Bold.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Cousine-BoldItalic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Cousine-Italic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Cousine-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'DejaVuSans.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'GardinerModBug.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'GardinerModCat.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Garuda.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Gelasio-Bold.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Gelasio-BoldItalic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Gelasio-Italic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Gelasio-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Lohit-Devanagari.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Lohit-Gurmukhi.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Lohit-Tamil.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'MuktiNarrow.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'NotoColorEmoji.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'NotoSansCJKjp-Regular.otf', None],
[[CONTENT_SHELL_FONTS_DIR], 'NotoSansKhmer-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'NotoSansSymbols2-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'NotoSansTibetan-Regular.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Tinos-Bold.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Tinos-BoldItalic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Tinos-Italic.ttf', None],
[[CONTENT_SHELL_FONTS_DIR], 'Tinos-Regular.ttf', None],
]
# This is the fingerprint of wpt's certificate found in
# blinkpy/third_party/wpt/certs. The following line is updated by
# update_cert.py.
WPT_FINGERPRINT = 'Nxvaj3+bY3oVrTc+Jp7m3E3sB1n3lXtnMDCyBsqEXiY='
# One for 127.0.0.1.sxg.pem
SXG_FINGERPRINT = '55qC1nKu2A88ESbFmk5sTPQS/ScG+8DD7P+2bgFA9iM='
# And one for external/wpt/signed-exchange/resources/127.0.0.1.sxg.pem
SXG_WPT_FINGERPRINT = '0Rt4mT6SJXojEMHTnKnlJ/hBKMBcI4kteBlhR1eTTdk='
# A convervative rule for names that are valid for file or directory names.
VALID_FILE_NAME_REGEX = re.compile(r'^[\w\-=]+$')
# This sub directory will be inside the results directory and it will
# contain all the disc artifacts created by web tests
ARTIFACTS_SUB_DIR = 'layout-test-results'
class Port(object):
"""Abstract class for Port-specific hooks for the web_test package."""
# Subclasses override this. This should indicate the basic implementation
# part of the port name, e.g., 'mac', 'win', 'gtk'; there is one unique
# value per class.
# FIXME: Rename this to avoid confusion with the "full port name".
port_name = None
# Test paths use forward slash as separator on all platforms.
TEST_PATH_SEPARATOR = '/'
ALL_BUILD_TYPES = ('debug', 'release')
CONTENT_SHELL_NAME = 'content_shell'
# Update the first line in third_party/blink/web_tests/TestExpectations and
# the documentation in docs/testing/web_test_expectations.md when this list
# changes.
ALL_SYSTEMS = (
('mac10.12', 'x86'),
('mac10.13', 'x86'),
('mac10.14', 'x86'),
('mac10.15', 'x86'),
('mac11', 'x86'),
('mac11-arm64', 'arm64'),
('win7', 'x86'),
('win10.20h2', 'x86'),
('trusty', 'x86_64'),
('fuchsia', 'x86_64'),
)
CONFIGURATION_SPECIFIER_MACROS = {
'mac': [
'mac10.12', 'mac10.13', 'mac10.14', 'mac10.15', 'mac11',
'mac11-arm64'
],
'win': ['win7', 'win10.20h2'],
'linux': ['trusty'],
'fuchsia': ['fuchsia'],
}
# List of ports open on the host that the tests will connect to. When tests
# run on a separate machine (Android and Fuchsia) these ports need to be
# forwarded back to the host.
# 8000, 8080 and 8443 are for http/https tests;
# 8880 is for websocket tests (see apache_http.py and pywebsocket.py).
# 8001, 8081, 8444, and 8445 are for http/https WPT;
# 9001 and 9444 are for websocket WPT (see wptserve.py).
SERVER_PORTS = [8000, 8001, 8080, 8081, 8443, 8444, 8445, 8880, 9001, 9444]
FALLBACK_PATHS = {}
SUPPORTED_VERSIONS = []
# URL to the build requirements page.
BUILD_REQUIREMENTS_URL = ''
# The suffixes of baseline files (not extensions).
BASELINE_SUFFIX = '-expected'
BASELINE_MISMATCH_SUFFIX = '-expected-mismatch'
# All of the non-reftest baseline extensions we use.
BASELINE_EXTENSIONS = ('.wav', '.txt', '.png')
FLAG_EXPECTATIONS_PREFIX = 'FlagExpectations'
# The following is used for concetenating WebDriver test names.
WEBDRIVER_SUBTEST_SEPARATOR = '>>'
# The following is used for concetenating WebDriver test names in pytest format.
WEBDRIVER_SUBTEST_PYTEST_SEPARATOR = '::'
# The following two constants must match. When adding a new WPT root, also
# remember to add an alias rule to //third_party/wpt_tools/wpt.config.json.
# WPT_DIRS maps WPT roots on the file system to URL prefixes on wptserve.
# The order matters: '/' MUST be the last URL prefix.
WPT_DIRS = collections.OrderedDict([
('wpt_internal', '/wpt_internal/'),
('external/wpt', '/'),
])
# WPT_REGEX captures: 1. the root directory of WPT relative to web_tests
# (without a trailing slash), 2. the path of the test within WPT (without a
# leading slash).
WPT_REGEX = re.compile(
r'^(?:virtual/[^/]+/)?(external/wpt|wpt_internal)/(.*)$')
# Because this is an abstract base class, arguments to functions may be
# unused in this class - pylint: disable=unused-argument
@classmethod
def latest_platform_fallback_path(cls):
return cls.FALLBACK_PATHS[cls.SUPPORTED_VERSIONS[-1]]
@classmethod
def determine_full_port_name(cls, host, options, port_name):
"""Return a fully-specified port name that can be used to construct objects."""
# Subclasses will usually override this.
assert port_name.startswith(cls.port_name)
return port_name
def __init__(self, host, port_name, options=None, **kwargs):
# This value is the "full port name", and may be different from
# cls.port_name by having version modifiers appended to it.
self._name = port_name
# These are default values that should be overridden in a subclasses.
self._version = ''
self._architecture = 'x86'
# FIXME: Ideally we'd have a package-wide way to get a well-formed
# options object that had all of the necessary options defined on it.
self._options = options or optparse.Values()
self.host = host
self._executive = host.executive
self._filesystem = host.filesystem
self._path_finder = path_finder.PathFinder(host.filesystem)
self._http_server = None
self._websocket_server = None
self._wpt_server = None
self._image_differ = None
self.server_process_constructor = server_process.ServerProcess # This can be overridden for testing.
self._http_lock = None # FIXME: Why does this live on the port object?
self._dump_reader = None
if not hasattr(options, 'configuration') or not options.configuration:
self.set_option_default('configuration',
self.default_configuration())
if not hasattr(options, 'target') or not options.target:
self.set_option_default('target', self._options.configuration)
if not hasattr(options, 'no_virtual_tests'):
self.set_option_default('no_virtual_tests', False)
self._test_configuration = None
self._results_directory = None
self._virtual_test_suites = None
self._used_expectation_files = None
self._skia_gold_temp_dir = None
self._skia_gold_session_manager = None
self._skia_gold_properties = None
def __del__(self):
if self._skia_gold_temp_dir:
self._filesystem.rmtree(self._skia_gold_temp_dir,
ignore_errors=True)
def __str__(self):
return 'Port{name=%s, version=%s, architecture=%s, test_configuration=%s}' % (
self._name, self._version, self._architecture,
self._test_configuration)
def get_platform_tags(self):
"""Returns system condition tags that are used to find active expectations
for a test run on a specific system"""
return frozenset([
self._options.configuration.lower(), self._version, self.port_name,
self._architecture
])
@memoized
def flag_specific_config_name(self):
"""Returns the name of the flag-specific configuration which best matches
self._specified_additional_driver_flags(), or the first specified flag
with leading '-'s stripped if no match in the configuration is found.
"""
specified_flags = self._specified_additional_driver_flags()
if not specified_flags:
return None
best_match = None
configs = self._flag_specific_configs()
for name in configs:
# To match the specified flags must start with all config args.
args = configs[name]
if specified_flags[:len(args)] != args:
continue
# The first config matching the highest number of specified flags wins.
if not best_match or len(configs[best_match]) < len(args):
best_match = name
if best_match:
return best_match
# If no match, fallback to the old mode: using the name of the first specified flag.
return specified_flags[0].lstrip('-')
@memoized
def _flag_specific_configs(self):
"""Reads configuration from FlagSpecificConfig and returns a dictionary from name to args."""
config_file = self._filesystem.join(self.web_tests_dir(),
'FlagSpecificConfig')
if not self._filesystem.exists(config_file):
return {}
try:
json_configs = json.loads(
self._filesystem.read_text_file(config_file))
except ValueError as error:
raise ValueError('{} is not a valid JSON file: {}'.format(
config_file, error))
configs = {}
for config in json_configs:
name = config['name']
args = config['args']
if not VALID_FILE_NAME_REGEX.match(name):
raise ValueError(
'{}: name "{}" contains invalid characters'.format(
config_file, name))
if name in configs:
raise ValueError('{} contains duplicated name {}.'.format(
config_file, name))
if args in configs.values():
raise ValueError(
'{}: name "{}" has the same args as another entry.'.format(
config_file, name))
configs[name] = args
return configs
def _specified_additional_driver_flags(self):
"""Returns the list of additional driver flags specified by the user in
the following ways, concatenated:
1. Flags in web_tests/additional-driver-flag.setting.
2. flags expanded from --flag-specific=<name> based on flag-specific config.
3. Zero or more flags passed by --additional-driver-flag.
"""
flags = []
flag_file = self._filesystem.join(self.web_tests_dir(),
'additional-driver-flag.setting')
if self._filesystem.exists(flag_file):
flags = self._filesystem.read_text_file(flag_file).split()
flag_specific_option = self.get_option('flag_specific')
if flag_specific_option:
configs = self._flag_specific_configs()
assert flag_specific_option in configs, '{} is not defined in FlagSpecificConfig'.format(
flag_specific_option)
flags += configs[flag_specific_option]
flags += self.get_option('additional_driver_flag', [])
return flags
def additional_driver_flags(self):
flags = self._specified_additional_driver_flags()
if self.driver_name() == self.CONTENT_SHELL_NAME:
flags += [
'--run-web-tests',
'--ignore-certificate-errors-spki-list=' + WPT_FINGERPRINT +
',' + SXG_FINGERPRINT + ',' + SXG_WPT_FINGERPRINT,
# Required for WebTransport tests.
'--origin-to-force-quic-on=web-platform.test:11000',
'--user-data-dir'
]
if self.get_option('nocheck_sys_deps', False):
flags.append('--disable-system-font-check')
# If we're already repeating the tests more than once, then we're not
# particularly concerned with speed. Resetting the shell between tests
# increases test run time by 2-5X, but provides more consistent results
# [less state leaks between tests].
if (self.get_option('reset_shell_between_tests')
or (self.get_option('repeat_each')
and self.get_option('repeat_each') > 1)
or (self.get_option('iterations')
and self.get_option('iterations') > 1)):
flags += ['--reset-shell-between-tests']
return flags
def supports_per_test_timeout(self):
return False
def default_smoke_test_only(self):
return False
def _default_timeout_ms(self):
return 6000
def timeout_ms(self):
timeout_ms = self._default_timeout_ms()
if self.get_option('configuration') == 'Debug':
# Debug is about 5x slower than Release.
return 5 * timeout_ms
if self._build_has_dcheck_always_on():
# Release with DCHECK is also slower than pure Release.
return 2 * timeout_ms
return timeout_ms
def skia_gold_temp_dir(self):
return self._skia_gold_temp_dir
def skia_gold_properties(self):
if not self._skia_gold_properties:
self._skia_gold_properties = sgp.BlinkSkiaGoldProperties(
self._options)
return self._skia_gold_properties
def skia_gold_session_manager(self):
if not self._skia_gold_session_manager:
self._skia_gold_temp_dir = self._filesystem.mkdtemp()
self._skia_gold_session_manager = sgsm.BlinkSkiaGoldSessionManager(
str(self._skia_gold_temp_dir), self.skia_gold_properties())
return self._skia_gold_session_manager
def skia_gold_json_keys(self):
return {
'configuration': self._options.configuration.lower(),
'version': self._version,
'port': self.port_name,
'architecture': self._architecture,
'ignore': '1',
}
@memoized
def _build_has_dcheck_always_on(self):
args_gn_file = self._build_path('args.gn')
if not self._filesystem.exists(args_gn_file):
_log.error('Unable to find %s', args_gn_file)
return False
contents = self._filesystem.read_text_file(args_gn_file)
return bool(
re.search(r'^\s*dcheck_always_on\s*=\s*true\s*(#.*)?$', contents,
re.MULTILINE))
def driver_stop_timeout(self):
"""Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
# We want to wait for at least 3 seconds, but if we are really slow, we
# want to be slow on cleanup as well (for things like ASAN, Valgrind, etc.)
return (3.0 * float(self.get_option('timeout_ms', '0')) /
self._default_timeout_ms())
def default_batch_size(self):
"""Returns the default batch size to use for this port."""
if self.get_option('enable_sanitizer'):
# ASAN/MSAN/TSAN use more memory than regular content_shell. Their
# memory usage may also grow over time, up to a certain point.
# Relaunching the driver periodically helps keep it under control.
return 40
# The default is infinite batch size.
return 0
def default_child_processes(self):
"""Returns the number of child processes to use for this port."""
return self._executive.cpu_count()
def default_max_locked_shards(self):
"""Returns the number of "locked" shards to run in parallel (like the http tests)."""
max_locked_shards = int(self.default_child_processes()) // 4
if not max_locked_shards:
return 1
return max_locked_shards
def baseline_version_dir(self):
"""Returns the absolute path to the platform-and-version-specific results."""
baseline_search_paths = self.baseline_search_path()
return baseline_search_paths[0]
def baseline_flag_specific_dir(self):
"""If --additional-driver-flag is specified, returns the absolute path to the flag-specific
platform-independent results. Otherwise returns None."""
flag_specific_path = self._flag_specific_baseline_search_path()
return flag_specific_path[-1] if flag_specific_path else None
def baseline_search_path(self):
return (self.get_option('additional_platform_directory', []) +
self._flag_specific_baseline_search_path() +
self._compare_baseline() +
list(self.default_baseline_search_path()))
def default_baseline_search_path(self):
"""Returns a list of absolute paths to directories to search under for baselines.
The directories are searched in order.
"""
return map(self._absolute_baseline_path,
self.FALLBACK_PATHS[self.version()])
@memoized
def _compare_baseline(self):
factory = PortFactory(self.host)
target_port = self.get_option('compare_port')
if target_port:
return factory.get(target_port).default_baseline_search_path()
return []
def _check_file_exists(self,
path_to_file,
file_description,
override_step=None,
more_logging=True):
"""Verifies that the file is present where expected, or logs an error.
Args:
file_name: The (human friendly) name or description of the file
you're looking for (e.g., "HTTP Server"). Used for error logging.
override_step: An optional string to be logged if the check fails.
more_logging: Whether or not to log the error messages.
Returns:
True if the file exists, else False.
"""
if not self._filesystem.exists(path_to_file):
if more_logging:
_log.error('Unable to find %s', file_description)
_log.error(' at %s', path_to_file)
if override_step:
_log.error(' %s', override_step)
_log.error('')
return False
return True
def check_build(self, needs_http, printer):
if not self._check_file_exists(self._path_to_driver(), 'test driver'):
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
if not self._check_driver_build_up_to_date(
self.get_option('configuration')):
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
if not self._check_file_exists(self._path_to_image_diff(),
'image_diff'):
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
if self._dump_reader and not self._dump_reader.check_is_functional():
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
if needs_http and not self.check_httpd():
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
return exit_codes.OK_EXIT_STATUS
def check_sys_deps(self):
"""Checks whether the system is properly configured.
Most checks happen during invocation of the driver prior to running
tests. This can be overridden to run custom checks.
Returns:
An exit status code.
"""
return exit_codes.OK_EXIT_STATUS
def check_httpd(self):
httpd_path = self.path_to_apache()
if httpd_path:
try:
env = self.setup_environ_for_server()
if self._executive.run_command(
[httpd_path, '-v'], env=env, return_exit_code=True) != 0:
_log.error('httpd seems broken. Cannot run http tests.')
return False
return True
except OSError as e:
_log.error('while trying to run: ' + httpd_path)
_log.error('httpd launch error: ' + repr(e))
_log.error('No httpd found. Cannot run http tests.')
return False
def do_text_results_differ(self, expected_text, actual_text):
return expected_text != actual_text
def do_audio_results_differ(self, expected_audio, actual_audio):
return expected_audio != actual_audio
def diff_image(self,
expected_contents,
actual_contents,
max_channel_diff=None,
max_pixels_diff=None):
"""Compares two images and returns an (image diff, error string) pair.
If an error occurs (like image_diff isn't found, or crashes), we log an
error and return True (for a diff).
"""
# If only one of them exists, return that one.
if not actual_contents and not expected_contents:
return (None, None)
if not actual_contents:
return (expected_contents, None)
if not expected_contents:
return (actual_contents, None)
tempdir = self._filesystem.mkdtemp()
expected_filename = self._filesystem.join(str(tempdir), 'expected.png')
self._filesystem.write_binary_file(expected_filename,
expected_contents)
actual_filename = self._filesystem.join(str(tempdir), 'actual.png')
self._filesystem.write_binary_file(actual_filename, actual_contents)
diff_filename = self._filesystem.join(str(tempdir), 'diff.png')
executable = self._path_to_image_diff()
# Although we are handed 'old', 'new', image_diff wants 'new', 'old'.
command = [
executable, '--diff', actual_filename, expected_filename,
diff_filename
]
# Notifies image_diff to allow a tolerance when calculating the pixel
# diff. To account for variances when the tests are ran on an actual
# GPU.
if self.get_option('fuzzy_diff'):
command.append('--fuzzy-diff')
# The max_channel_diff and max_pixels_diff arguments are used by WPT
# tests for fuzzy reftests. See
# https://web-platform-tests.org/writing-tests/reftests.html#fuzzy-matching
if max_channel_diff is not None:
command.append('--fuzzy-max-channel-diff={}'.format('-'.join(
map(str, max_channel_diff))))
if max_pixels_diff is not None:
command.append('--fuzzy-max-pixels-diff={}'.format('-'.join(
map(str, max_pixels_diff))))
result = None
err_str = None
try:
output = self._executive.run_command(command)
# Log the output, to enable user debugging of a diff hidden by fuzzy
# expectations. This is useful when tightening fuzzy bounds.
if output:
_log.debug(output)
except ScriptError as error:
if error.exit_code == 1:
result = self._filesystem.read_binary_file(diff_filename)
# Log the output, to enable user debugging of the diff.
if error.output:
_log.debug(error.output)
else:
err_str = 'Image diff returned an exit code of %s. See http://crbug.com/278596' % error.exit_code
except OSError as error:
err_str = 'error running image diff: %s' % error
finally:
self._filesystem.rmtree(str(tempdir))
return (result, err_str or None)
def driver_name(self):
if self.get_option('driver_name'):
return self.get_option('driver_name')
return self.CONTENT_SHELL_NAME
def expected_baselines_by_extension(self, test_name):
"""Returns a dict mapping baseline suffix to relative path for each baseline in a test.
For reftests, it returns ".==" or ".!=" instead of the suffix.
"""
# FIXME: The name similarity between this and expected_baselines()
# below, is unfortunate. We should probably rename them both.
baseline_dict = {}
reference_files = self.reference_files(test_name)
if reference_files:
# FIXME: How should this handle more than one type of reftest?
baseline_dict['.' + reference_files[0][0]] = \
self.relative_test_filename(reference_files[0][1])
for extension in self.BASELINE_EXTENSIONS:
path = self.expected_filename(
test_name, extension, return_default=False)
baseline_dict[extension] = self.relative_test_filename(
path) if path else path
return baseline_dict
def output_filename(self, test_name, suffix, extension):
"""Generates the output filename for a test.
This method gives a proper filename for various outputs of a test,
including baselines and actual results. Usually, the output filename
follows the pattern: test_name_without_ext+suffix+extension, but when
the test name contains query strings, e.g. external/wpt/foo.html?wss,
test_name_without_ext is mangled to be external/wpt/foo_wss.
It is encouraged to use this method instead of writing another mangling.
Args:
test_name: The name of a test.
suffix: A suffix string to add before the extension
(e.g. "-expected").
extension: The extension of the output file (starting with .).
Returns:
A string, the output filename.
"""
# WPT names might contain query strings, e.g. external/wpt/foo.html?wss,
# in which case we mangle test_name_root (the part of a path before the
# last extension point) to external/wpt/foo_wss, and the output filename
# becomes external/wpt/foo_wss-expected.txt.
index = test_name.find('?')
if index != -1:
test_name_root, _ = self._filesystem.splitext(test_name[:index])
query_part = test_name[index:]
test_name_root += self._filesystem.sanitize_filename(query_part)
else:
test_name_root, _ = self._filesystem.splitext(test_name)
return test_name_root + suffix + extension
def expected_baselines(self,
test_name,
extension,
all_baselines=False,
match=True):
"""Given a test name, finds where the baseline results are located.
Return values will be in the format appropriate for the current
platform (e.g., "\\" for path separators on Windows). If the results
file is not found, then None will be returned for the directory,
but the expected relative pathname will still be returned.
This routine is generic but lives here since it is used in
conjunction with the other baseline and filename routines that are
platform specific.
Args:
test_name: Name of test file (usually a relative path under web_tests/)
extension: File extension of the expected results, including dot;
e.g. '.txt' or '.png'. This should not be None, but may be an
empty string.
all_baselines: If True, return an ordered list of all baseline paths
for the given platform. If False, return only the first one.
match: Whether the baseline is a match or a mismatch.
Returns:
A list of (platform_dir, results_filename) pairs, where
platform_dir - abs path to the top of the results tree (or test
tree)
results_filename - relative path from top of tree to the results
file
(port.join() of the two gives you the full path to the file,
unless None was returned.)
"""
baseline_filename = self.output_filename(
test_name,
self.BASELINE_SUFFIX if match else self.BASELINE_MISMATCH_SUFFIX,
extension)
baseline_search_path = self.baseline_search_path()
baselines = []
for platform_dir in baseline_search_path:
if self._filesystem.exists(
self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if not all_baselines and baselines:
return baselines
# If it wasn't found in a platform directory, return the expected
# result in the test directory, even if no such file actually exists.
platform_dir = self.web_tests_dir()
if self._filesystem.exists(
self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if baselines:
return baselines
return [(None, baseline_filename)]
def expected_filename(self,
test_name,
extension,
return_default=True,
fallback_base_for_virtual=True,
match=True):
"""Given a test name, returns an absolute path to its expected results.
If no expected results are found in any of the searched directories,
the directory in which the test itself is located will be returned.
The return value is in the format appropriate for the platform
(e.g., "\\" for path separators on windows).
This routine is generic but is implemented here to live alongside
the other baseline and filename manipulation routines.
Args:
test_name: Name of test file (usually a relative path under web_tests/)
extension: File extension of the expected results, including dot;
e.g. '.txt' or '.png'. This should not be None, but may be an
empty string.
return_default: If True, returns the path to the generic expectation
if nothing else is found; if False, returns None.
fallback_base_for_virtual: For virtual test only. When no virtual
specific baseline is found, if this parameter is True, fallback
to find baselines of the base test; if False, depending on
|return_default|, returns the generic virtual baseline or None.
match: Whether the baseline is a match or a mismatch.
Returns:
An absolute path to its expected results, or None if not found.
"""
# The [0] means the first expected baseline (which is the one to be
# used) in the fallback paths.
platform_dir, baseline_filename = self.expected_baselines(
test_name, extension, match=match)[0]
if platform_dir:
return self._filesystem.join(platform_dir, baseline_filename)
if fallback_base_for_virtual:
actual_test_name = self.lookup_virtual_test_base(test_name)
if actual_test_name:
return self.expected_filename(
actual_test_name, extension, return_default, match=match)
if return_default:
return self._filesystem.join(self.web_tests_dir(),
baseline_filename)
return None
def fallback_expected_filename(self, test_name, extension):
"""Given a test name, returns an absolute path to its next fallback baseline.
Args:
same as expected_filename()
Returns:
An absolute path to the next fallback baseline, or None if not found.
"""
baselines = self.expected_baselines(
test_name, extension, all_baselines=True)
if len(baselines) < 2:
actual_test_name = self.lookup_virtual_test_base(test_name)
if actual_test_name:
if len(baselines) == 0:
return self.fallback_expected_filename(
actual_test_name, extension)
# In this case, baselines[0] is the current baseline of the
# virtual test, so the first base test baseline is the fallback
# baseline of the virtual test.
return self.expected_filename(
actual_test_name, extension, return_default=False)
return None
platform_dir, baseline_filename = baselines[1]
if platform_dir:
return self._filesystem.join(platform_dir, baseline_filename)
return None
def expected_checksum(self, test_name):
"""Returns the checksum of the image we expect the test to produce,
or None if it is a text-only test.
"""
png_path = self.expected_filename(test_name, '.png')
if self._filesystem.exists(png_path):
with self._filesystem.open_binary_file_for_reading(
png_path) as filehandle:
return read_checksum_from_png.read_checksum(filehandle)
return None
def expected_image(self, test_name):
"""Returns the image we expect the test to produce."""
baseline_path = self.expected_filename(test_name, '.png')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_audio(self, test_name):
baseline_path = self.expected_filename(test_name, '.wav')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_text(self, test_name):
"""Returns the text output we expect the test to produce, or None
if we don't expect there to be any text output.
End-of-line characters are normalized to '\n'.
"""
# FIXME: DRT output is actually utf-8, but since we don't decode the
# output from DRT (instead treating it as a binary string), we read the
# baselines as a binary string, too.
baseline_path = self.expected_filename(test_name, '.txt')
if not self._filesystem.exists(baseline_path):
return None
text = self._filesystem.read_binary_file(baseline_path)
return text.replace(b'\r\n', b'\n')
def expected_subtest_failure(self, test_name):
baseline = self.expected_text(test_name)
if baseline:
baseline = baseline.decode('utf8', 'replace')
if re.search(r"^(FAIL|NOTRUN|TIMEOUT)", baseline, re.MULTILINE):
return True
return False
def expected_harness_error(self, test_name):
baseline = self.expected_text(test_name)
if baseline:
baseline = baseline.decode('utf8', 'replace')
if re.search(r"^Harness Error\.", baseline, re.MULTILINE):
return True
return False
def reference_files(self, test_name):
"""Returns a list of expectation (== or !=) and filename pairs"""
# Try to find -expected.* or -expected-mismatch.* in the same directory.
reftest_list = []
for expectation in ('==', '!='):
for extension in Port.supported_file_extensions:
path = self.expected_filename(
test_name, extension, match=(expectation == '=='))
if self._filesystem.exists(path):
reftest_list.append((expectation, path))
if reftest_list:
return reftest_list
# Try to extract information from MANIFEST.json.
match = self.WPT_REGEX.match(test_name)
if not match:
return []
wpt_path = match.group(1)
path_in_wpt = match.group(2)
for expectation, ref_path_in_wpt in self.wpt_manifest(
wpt_path).extract_reference_list(path_in_wpt):
ref_absolute_path = self._filesystem.join(
self.web_tests_dir(), wpt_path + ref_path_in_wpt)
reftest_list.append((expectation, ref_absolute_path))
return reftest_list
def tests(self, paths=None):
"""Returns all tests or tests matching supplied paths.
Args:
paths: Array of paths to match. If supplied, this function will only
return tests matching at least one path in paths.
Returns:
An array of test paths and test names. The latter are web platform
tests that don't correspond to file paths but are valid tests,
for instance a file path test.any.js could correspond to two test
names: test.any.html and test.any.worker.html.
"""
tests = self.real_tests(paths)
if paths:
if not self._options.no_virtual_tests:
tests.extend(self._virtual_tests_matching_paths(paths))
if (any(wpt_path in path for wpt_path in self.WPT_DIRS
for path in paths)
# TODO(robertma): Remove this special case when external/wpt is moved to wpt.
or any('external' in path for path in paths)):
tests.extend(self._wpt_test_urls_matching_paths(paths))
else:
# '/' is used instead of filesystem.sep as the WPT manifest always
# uses '/' for paths (it is not OS dependent).
wpt_tests = [
wpt_path + '/' + test for wpt_path in self.WPT_DIRS
for test in self.wpt_manifest(wpt_path).all_urls()
]
tests_by_dir = defaultdict(list)
for test in tests + wpt_tests:
dirname = os.path.dirname(test) + '/'
tests_by_dir[dirname].append(test)
if not self._options.no_virtual_tests:
tests.extend(self._all_virtual_tests(tests_by_dir))
tests.extend(wpt_tests)
return tests
def real_tests_from_dict(self, paths, tests_by_dir):
"""Find all real tests in paths, using results saved in dict."""
files = []
for path in paths:
if self._has_supported_extension_for_all(path):
files.append(path)
continue
path = path + '/' if path[-1] != '/' else path
for key, value in tests_by_dir.items():
if key.startswith(path):
files.extend(value)
return files
def real_tests(self, paths):
"""Find all real tests in paths except WPT."""
# When collecting test cases, skip these directories.
skipped_directories = set([