Add log when experimental network loader is used
[WebKit-https.git] / Tools / Scripts / webkitpy / tool / gcovr
1 #! /usr/bin/env python
2 #
3 # A report generator for gcov 3.4
4 #
5 # This routine generates a format that is similar to the format generated
6 # by the Python coverage.py module.  This code is similar to the
7 # data processing performed by lcov's geninfo command.  However, we
8 # don't worry about parsing the *.gcna files, and backwards compatibility for
9 # older versions of gcov is not supported.
10 #
11 # Outstanding issues
12 #   - verify that gcov 3.4 or newer is being used
13 #   - verify support for symbolic links
14 #
15 # gcovr is a FAST project.  For documentation, bug reporting, and
16 # updates, see https://software.sandia.gov/trac/fast/wiki/gcovr
17 #
18 # _________________________________________________________________________
19 #
20 # FAST: Utilities for Agile Software Development
21 # Copyright (c) 2008 Sandia Corporation.
22 # This software is distributed under the BSD License.
23 # Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
24 # the U.S. Government retains certain rights in this software.
25 # For more information, see the FAST README.txt file.
26 #
27 # $Revision: 2839 $
28 # $Date: 2013-05-27 11:13:17 -0700 (Mon, 27 May 2013) $
29 # _________________________________________________________________________
30 #
31
32 import copy
33 import os
34 import re
35 import subprocess
36 import sys
37 import time
38 import xml.dom.minidom
39
40 from optparse import OptionParser
41 from os.path import normpath
42
43 __version__ = "2.5-prerelease"
44 src_revision = "$Revision: 2839 $"
45 gcov_cmd = "gcov"
46
47 output_re = re.compile("[Cc]reating [`'](.*)'$")
48 source_re = re.compile("cannot open (source|graph) file")
49
50 starting_dir = os.getcwd()
51
52
53 def version_str():
54     ans = __version__
55     m = re.match('\$Revision:\s*(\S+)\s*\$', src_revision)
56     if m:
57         ans = ans + " (r%s)" % (m.group(1))
58     return ans
59
60 #
61 # Container object for coverage statistics
62 #
63 class CoverageData(object):
64
65     def __init__(self, fname, uncovered, uncovered_exceptional, covered, branches, noncode):
66         self.fname=fname
67         # Shallow copies are cheap & "safe" because the caller will
68         # throw away their copies of covered & uncovered after calling
69         # us exactly *once*
70         self.uncovered = copy.copy(uncovered)
71         self.uncovered_exceptional = copy.copy(uncovered_exceptional)
72         self.covered   = copy.copy(covered)
73         self.noncode   = copy.copy(noncode)
74         # But, a deep copy is required here
75         self.all_lines = copy.deepcopy(uncovered)
76         self.all_lines.update(uncovered_exceptional)
77         self.all_lines.update(covered.keys())
78         self.branches = copy.deepcopy(branches)
79
80     def update(self, uncovered, uncovered_exceptional, covered, branches, noncode):
81         self.all_lines.update(uncovered)
82         self.all_lines.update(uncovered_exceptional)
83         self.all_lines.update(covered.keys())
84         self.uncovered.update(uncovered)
85         self.uncovered_exceptional.update(uncovered_exceptional)
86         self.noncode.intersection_update(noncode)
87         for k in covered.keys():
88             self.covered[k] = self.covered.get(k,0) + covered[k]
89         for k in branches.keys():
90             for b in branches[k]:
91                 d = self.branches.setdefault(k, {})
92                 d[b] = d.get(b, 0) + branches[k][b]
93         self.uncovered.difference_update(self.covered.keys())
94         self.uncovered_exceptional.difference_update(self.covered.keys())
95
96     def uncovered_str(self, exceptional):
97         if options.show_branch:
98             # Don't do any aggregation on branch results
99             tmp = []
100             for line in self.branches.keys():
101                 for branch in self.branches[line]:
102                     if self.branches[line][branch] == 0:
103                         tmp.append(line)
104                         break
105
106             tmp.sort()
107             return ",".join([str(x) for x in tmp]) or ""
108         
109         if exceptional:
110             tmp = list(self.uncovered_exceptional)
111         else:
112             tmp = list(self.uncovered)
113         if len(tmp) == 0:
114             return ""
115
116         tmp.sort()
117         first = None
118         last = None
119         ranges=[]
120         for item in tmp:
121             if last is None:
122                 first=item
123                 last=item
124             elif item == (last+1):
125                 last=item
126             else:
127                 if len(self.noncode.intersection(range(last+1,item))) \
128                        == item - last - 1:
129                     last = item
130                     continue
131                 
132                 if first==last:
133                     ranges.append(str(first))
134                 else:
135                     ranges.append(str(first)+"-"+str(last))
136                 first=item
137                 last=item
138         if first==last:
139             ranges.append(str(first))
140         else:
141             ranges.append(str(first)+"-"+str(last))
142         return ",".join(ranges)
143
144     def coverage(self):
145         if ( options.show_branch ):
146             total = 0
147             cover = 0
148             for line in self.branches.keys():
149                 for branch in self.branches[line].keys():
150                     total += 1
151                     cover += self.branches[line][branch] > 0 and 1 or 0
152         else:
153             total = len(self.all_lines)
154             cover = len(self.covered)
155             
156         percent = total and str(int(100.0*cover/total)) or "--"
157         return (total, cover, percent)
158
159     def summary(self):
160         tmp = options.filter.sub('',self.fname)
161         if not self.fname.endswith(tmp):
162             # Do no truncation if the filter does not start matching at
163             # the beginning of the string
164             tmp = self.fname
165         tmp = tmp.ljust(40)
166         if len(tmp) > 40:
167             tmp=tmp+"\n"+" "*40
168
169         (total, cover, percent) = self.coverage()
170         uncovered_lines = self.uncovered_str(False) 
171         if not options.show_branch: 
172             t = self.uncovered_str(True) 
173             if len(t): 
174                 uncovered_lines += " [* " + t + "]"; 
175         return ( total, cover,
176                  tmp + str(total).rjust(8) + str(cover).rjust(8) + \
177                  percent.rjust(6) + "%   " + uncovered_lines )
178
179
180 def resolve_symlinks(orig_path):
181     """
182     Return the normalized absolute path name with all symbolic links resolved
183     """
184     drive,tmp = os.path.splitdrive(os.path.abspath(orig_path))
185     if not drive:
186         drive = os.path.sep
187     parts = tmp.split(os.path.sep)
188     actual_path = [drive]
189     while parts:
190         actual_path.append(parts.pop(0))
191         if not os.path.islink(os.path.join(*actual_path)):
192             continue
193         actual_path[-1] = os.readlink(os.path.join(*actual_path))
194         tmp_drive, tmp_path = os.path.splitdrive(
195             resolve_symlinks(os.path.join(*actual_path)) )
196         if tmp_drive:
197             drive = tmp_drive
198         actual_path = [drive] + tmp_path.split(os.path.sep)
199     return os.path.join(*actual_path)
200
201
202 def path_startswith(path, base):
203     return path.startswith(base) and (
204         len(base) == len(path) or path[len(base)] == os.path.sep )
205
206
207 class PathAliaser(object):
208     def __init__(self):
209         self.aliases = {}
210         self.master_targets = set()
211         self.preferred_name = {}
212
213     def master_path(self, path):
214         match_found = False
215         while True:
216             for base, alias in self.aliases.items():
217                 if path_startswith(path, base):
218                     path = alias + path[len(base):]
219                     match_found = True
220                     break
221             for master_base in self.master_targets:
222                 if path_startswith(path, master_base):
223                     return path, master_base, True
224             if match_found:
225                 sys.stderr.write(
226                     "(ERROR) violating fundamental assumption while walking "
227                     "directory tree.\n\tPlease report this to the gcovr "
228                     "developers.\n" )
229             return path, None, match_found
230
231     def unalias_path(self, path):
232         path = resolve_symlinks(path)
233         path, master_base, known_path = self.master_path(path)
234         if not known_path:
235             return path
236         # Try and resolve the preferred name for this location
237         if master_base in self.preferred_name:
238             return self.preferred_name[master_base] + path[len(master_base):]
239         return path
240
241     def add_master_target(self, master):
242         self.master_targets.add(master)
243
244     def add_alias(self, target, master):
245         self.aliases[target] = master
246
247     def set_preferred(self, master, preferred):
248         self.preferred_name[master] = preferred
249
250 aliases = PathAliaser()
251
252 # This is UGLY.  Here's why: UNIX resolves symbolic links by walking the
253 # entire directory structure.  What that means is that relative links
254 # are always relative to the actual directory inode, and not the
255 # "virtual" path that the user might have traversed (over symlinks) on
256 # the way to that directory.  Here's the canonical example:
257 #
258 #   a / b / c / testfile
259 #   a / d / e --> ../../a/b
260 #   m / n --> /a
261 #   x / y / z --> /m/n/d
262 #
263 # If we start in "y", we will see the following directory structure:
264 #   y
265 #   |-- z
266 #       |-- e
267 #           |-- c
268 #               |-- testfile
269 #
270 # The problem is that using a simple traversal based on the Python
271 # documentation:
272 #
273 #    (os.path.join(os.path.dirname(path), os.readlink(result)))
274 #
275 # will not work: we will see a link to /m/n/d from /x/y, but completely
276 # miss the fact that n is itself a link.  If we then naively attempt to
277 # apply the "c" relative link, we get an intermediate path that looks
278 # like "/m/n/d/e/../../a/b", which would get normalized to "/m/n/a/b"; a
279 # nonexistant path.  The solution is that we need to walk the original
280 # path, along with the full path of all links 1 directory at a time and
281 # check for embedded symlinks.
282 #
283 def link_walker(path):
284     targets = [os.path.abspath(path)]
285     while targets:
286         target_dir = targets.pop(0)
287         actual_dir = resolve_symlinks(target_dir)
288         #print "target dir: %s  (%s)" % (target_dir, actual_dir)
289         master_name, master_base, visited = aliases.master_path(actual_dir)
290         if visited:
291             #print "  ...root already visited as %s" % master_name
292             aliases.add_alias(target_dir, master_name)
293             continue
294         if master_name != target_dir:
295             aliases.set_preferred(master_name, target_dir)
296             aliases.add_alias(target_dir, master_name)
297         aliases.add_master_target(master_name)
298         #print "  ...master name = %s" % master_name
299         #print "  ...walking %s" % target_dir
300         for root, dirs, files in os.walk(target_dir, topdown=True):
301             #print "    ...reading %s" % root
302             for d in dirs:
303                 tmp = os.path.abspath(os.path.join(root, d))
304                 #print "    ...checking %s" % tmp
305                 if os.path.islink(tmp):
306                     #print "      ...buffering link %s" % tmp
307                     targets.append(tmp)
308             yield root, dirs, files
309
310
311 def search_file(expr, path):
312     """
313     Given a search path, recursively descend to find files that match a
314     regular expression.
315     """
316     ans = []
317     pattern = re.compile(expr)
318     if path is None or path == ".":
319         path = os.getcwd()
320     elif not os.path.exists(path):
321         raise IOError("Unknown directory '"+path+"'")
322     for root, dirs, files in link_walker(path):
323         for name in files:
324             if pattern.match(name):
325                 name = os.path.join(root,name)
326                 if os.path.islink(name):
327                     ans.append( os.path.abspath(os.readlink(name)) )
328                 else:
329                     ans.append( os.path.abspath(name) )
330     return ans
331
332
333 #
334 # Get the list of datafiles in the directories specified by the user
335 #
336 def get_datafiles(flist, options):
337     allfiles=[]
338     for dir in flist:
339         if options.verbose:
340             sys.stdout.write( "Scanning directory %s for gcda/gcno files...\n"
341                               % (dir, ) )
342         files = search_file(".*\.gc(da|no)$", dir)
343         # gcno files will *only* produce uncovered results; however,
344         # that is useful information for the case where a compilation
345         # unit is never actually exercised by the test code.  So, we
346         # will process gcno files, but ONLY if there is no corresponding
347         # gcda file.
348         gcda_files = [file for file in files if file.endswith('gcda')]
349         tmp = set(gcda_files)
350         gcno_files = [ file for file in files if
351                        file.endswith('gcno') and file[:-2]+'da' not in tmp ]
352         if options.verbose:
353             sys.stdout.write(
354                 "Found %d files (and will process %d)\n" %
355                 ( len(files), len(gcda_files) + len(gcno_files) ) )
356         allfiles.extend(gcda_files)
357         allfiles.extend(gcno_files)
358     return allfiles
359
360
361 def process_gcov_data(file, covdata, options):
362     INPUT = open(file,"r")
363     #
364     # Get the filename
365     #
366     line = INPUT.readline()
367     segments=line.split(':',3)
368     if len(segments) != 4 or not segments[2].lower().strip().endswith('source'):
369         raise RuntimeError('Fatal error parsing gcov file, line 1: \n\t"%s"' % line.rstrip())
370     currdir = os.getcwd()
371     os.chdir(starting_dir)
372     fname = aliases.unalias_path(os.path.abspath((segments[-1]).strip()))
373     os.chdir(currdir)
374     if options.verbose:
375         sys.stdout.write("Parsing coverage data for file %s\n" % fname)
376     #
377     # Return if the filename does not match the filter
378     #
379     if not options.filter.match(fname):
380         if options.verbose:
381             sys.stdout.write("  Filtering coverage data for file %s\n" % fname)
382         return
383     #
384     # Return if the filename matches the exclude pattern
385     #
386     for i in range(0,len(options.exclude)):
387         if options.exclude[i].match(options.filter.sub('',fname)) or \
388                options.exclude[i].match(fname) or \
389                options.exclude[i].match(os.path.abspath(fname)):
390             if options.verbose:
391                 sys.stdout.write("  Excluding coverage data for file %s\n" % fname)
392             return
393     #
394     # Parse each line, and record the lines
395     # that are uncovered
396     #
397     noncode = set()
398     uncovered = set()
399     uncovered_exceptional = set()
400     covered = {}
401     branches = {}
402     #first_record=True
403     lineno = 0
404     for line in INPUT:
405         segments=line.split(":",2)
406         #print "HERE", segments
407         tmp = segments[0].strip()
408         if len(segments) > 1:
409             try:
410                 lineno = int(segments[1].strip())
411             except:
412                 pass # keep previous line number!
413             
414         if tmp[0] == '#':
415             uncovered.add( lineno )
416         elif tmp[0] == '=':
417             uncovered_exceptional.add( lineno )
418         elif tmp[0] in "0123456789":
419             covered[lineno] = int(segments[0].strip())
420         elif tmp[0] == '-':
421             # remember certain non-executed lines
422             code = segments[2].strip()
423             if len(code) == 0 or code == "{" or code == "}" or \
424                code.startswith("//") or code == 'else':
425                 noncode.add( lineno )
426         elif tmp.startswith('branch'):
427             fields = line.split()
428             try:
429                 count = int(fields[3])
430                 branches.setdefault(lineno, {})[int(fields[1])] = count
431             except:
432                 # We ignore branches that were "never executed"
433                 pass
434         elif tmp.startswith('call'):
435             pass
436         elif tmp.startswith('function'):
437             pass
438         elif tmp[0] == 'f':
439             pass
440             #if first_record:
441                 #first_record=False
442                 #uncovered.add(prev)
443             #if prev in uncovered:
444                 #tokens=re.split('[ \t]+',tmp)
445                 #if tokens[3] != "0":
446                     #uncovered.remove(prev)
447             #prev = int(segments[1].strip())
448             #first_record=True
449         else:
450             sys.stderr.write(
451                 "(WARNING) Unrecognized GCOV output: '%s'\n"
452                 "\tThis is indicitive of a gcov output parse error.\n"
453                 "\tPlease report this to the gcovr developers." % tmp )
454     ##print 'uncovered',uncovered
455     ##print 'covered',covered
456     ##print 'branches',branches
457     ##print 'noncode',noncode
458     #
459     # If the file is already in covdata, then we
460     # remove lines that are covered here.  Otherwise,
461     # initialize covdata
462     #
463     if not fname in covdata:
464         covdata[fname] = CoverageData(fname,uncovered,uncovered_exceptional,covered,branches,noncode)
465     else:
466         covdata[fname].update(uncovered,uncovered_exceptional,covered,branches,noncode)
467     INPUT.close()
468
469 #
470 # Process a datafile (generated by running the instrumented application)
471 # and run gcov with the corresponding arguments
472 #
473 # This is trickier than it sounds: The gcda/gcno files are stored in the
474 # same directory as the object files; however, gcov must be run from the
475 # same directory where gcc/g++ was run.  Normally, the user would know
476 # where gcc/g++ was invoked from and could tell gcov the path to the
477 # object (and gcda) files with the --object-directory command.
478 # Unfortunately, we do everything backwards: gcovr looks for the gcda
479 # files and then has to infer the original gcc working directory.
480 #
481 # In general, (but not always) we can assume that the gcda file is in a
482 # subdirectory of the original gcc working directory, so we will first
483 # try ".", and on error, move up the directory tree looking for the
484 # correct working directory (letting gcov's own error codes dictate when
485 # we hit the right directory).  This covers 90+% of the "normal" cases.
486 # The exception to this is if gcc was invoked with "-o ../[...]" (i.e.,
487 # the object directory was a peer (not a parent/child) of the cwd.  In
488 # this case, things are really tough.  We accept an argument
489 # (--object-directory) that SHOULD BE THE SAME as the one povided to
490 # gcc.  We will then walk that path (backwards) in the hopes of
491 # identifying the original gcc working directory (there is a bit of
492 # trial-and-error here)
493 #
494 def process_datafile(filename, covdata, options):
495     #
496     # Launch gcov
497     #
498     abs_filename = os.path.abspath(filename)
499     (dirname,fname) = os.path.split(abs_filename)
500     #(name,ext) = os.path.splitext(base)
501
502     potential_wd = []
503     errors=[]
504     Done = False
505
506     if options.objdir:
507         src_components = abs_filename.split(os.sep)
508         components = normpath(options.objdir).split(os.sep)
509         idx = 1
510         while idx <= len(components):
511             if idx > len(src_components):
512                 break
513             if components[-1*idx] != src_components[-1*idx]:
514                 break
515             idx += 1
516         if idx > len(components):
517             pass # a parent dir; the normal process will find it
518         elif components[-1*idx] == '..':
519             dirs = [ os.path.join(src_components[:len(src_components)-idx+1]) ]
520             while idx <= len(components) and components[-1*idx] == '..':
521                 tmp = []
522                 for d in dirs:
523                     for f in os.listdir(d):
524                         x = os.path.join(d,f)
525                         if os.path.isdir(x):
526                             tmp.append(x)
527                 dirs = tmp
528                 idx += 1
529             potential_wd = dirs
530         else:
531             if components[0] == '':
532                 # absolute path
533                 tmp = [ options.objdir ]
534             else:
535                 # relative path: check relative to both the cwd and the
536                 # gcda file
537                 tmp = [ os.path.join(x, options.objdir) for x in
538                         [os.path.dirname(abs_filename), os.getcwd()] ]
539             potential_wd = [ testdir for testdir in tmp
540                              if os.path.isdir(testdir) ]
541             if len(potential_wd) == 0:
542                 errors.append("ERROR: cannot identify the location where GCC "
543                               "was run using --object-directory=%s\n" %
544                               options.objdir)
545             # Revert to the normal 
546             #sys.exit(1)
547
548     # no objdir was specified (or it was a parent dir); walk up the dir tree
549     if len(potential_wd) == 0:
550         wd = os.path.split(abs_filename)[0]
551         while True:
552             potential_wd.append(wd)
553             wd = os.path.split(wd)[0]
554             if wd == potential_wd[-1]:
555                 break
556
557     cmd = [ gcov_cmd, abs_filename,
558             "--branch-counts", "--branch-probabilities", "--preserve-paths", 
559             '--object-directory', dirname ]
560
561     # NB: We are lazy English speakers, so we will only parse English output
562     env = dict(os.environ)
563     env['LC_ALL'] = 'en_US'
564     
565
566     while len(potential_wd) > 0 and not Done:
567         # NB: either len(potential_wd) == 1, or all entires are absolute
568         # paths, so we don't have to chdir(starting_dir) at every
569         # iteration.
570         os.chdir(potential_wd.pop(0)) 
571         
572         
573         #if options.objdir:
574         #    cmd.extend(["--object-directory", Template(options.objdir).substitute(filename=filename, head=dirname, tail=base, root=name, ext=ext)])
575
576         if options.verbose:
577             sys.stdout.write("Running gcov: '%s' in '%s'\n" % ( ' '.join(cmd), os.getcwd() ))
578         (out, err) = subprocess.Popen( cmd, env=env,
579                                        stdout=subprocess.PIPE,
580                                        stderr=subprocess.PIPE ).communicate()
581         out=out.decode('utf-8')
582         err=err.decode('utf-8')
583
584         # find the files that gcov created
585         gcov_files = {'active':[], 'filter':[], 'exclude':[]}
586         for line in out.splitlines():
587             found = output_re.search(line.strip())
588             if found is not None:
589                 fname = found.group(1)
590                 if not options.gcov_filter.match(fname):
591                     if options.verbose:
592                         sys.stdout.write("Filtering gcov file %s\n" % fname)
593                     gcov_files['filter'].append(fname)
594                     continue
595                 exclude=False
596                 for i in range(0,len(options.gcov_exclude)):
597                     if options.gcov_exclude[i].match(options.gcov_filter.sub('',fname)) or \
598                            options.gcov_exclude[i].match(fname) or \
599                            options.gcov_exclude[i].match(os.path.abspath(fname)):
600                         exclude=True
601                         break
602                 if not exclude:
603                     gcov_files['active'].append(fname)
604                 elif options.verbose:
605                     sys.stdout.write("Excluding gcov file %s\n" % fname)
606                     gcov_files['exclude'].append(fname)
607
608         if source_re.search(err):
609             # gcov tossed errors: try the next potential_wd
610             errors.append(err)
611         else:
612             # Process *.gcov files
613             for fname in gcov_files['active']:
614                 process_gcov_data(fname, covdata, options)
615             Done = True
616
617         if not options.keep:
618             for group in gcov_files.values():
619                 for fname in group:
620                     if os.path.exists(fname):
621                         # Only remove files that actually exist.
622                         os.remove(fname)
623
624     os.chdir(starting_dir)
625     if options.delete:
626         if not abs_filename.endswith('gcno'):
627             os.remove(abs_filename)
628         
629     if not Done:
630         sys.stderr.write(
631             "(WARNING) GCOV produced the following errors processing %s:\n"
632             "\t   %s" 
633             "\t(gcovr could not infer a working directory that resolved it.)\n"
634             % ( filename, "\t   ".join(errors) ) )
635
636 #
637 # Produce the classic gcovr text report
638 #
639 def print_text_report(covdata):
640     def _num_uncovered(key):
641         (total, covered, percent) = covdata[key].coverage()
642         return total - covered
643     def _percent_uncovered(key):
644         (total, covered, percent) = covdata[key].coverage()
645         if covered:
646             return -1.0*covered/total
647         else:
648             return total or 1e6
649     def _alpha(key):
650         return key
651
652     if options.output:
653         OUTPUT = open(options.output,'w')
654     else:
655         OUTPUT = sys.stdout
656     total_lines=0
657     total_covered=0
658     # Header
659     OUTPUT.write("-"*78 + '\n')
660     a = options.show_branch and "Branches" or "Lines"
661     b = options.show_branch and "Taken" or "Exec"
662     c = "Missing"
663     OUTPUT.write("File".ljust(40) + a.rjust(8) + b.rjust(8)+ "  Cover   " + c + "\n") 
664     OUTPUT.write("-"*78 + '\n')
665
666     # Data
667     keys = list(covdata.keys())
668     keys.sort(key=options.sort_uncovered and _num_uncovered or \
669               options.sort_percent and _percent_uncovered or _alpha)
670     for key in keys:
671         (t, n, txt) = covdata[key].summary()
672         total_lines += t
673         total_covered += n
674         OUTPUT.write(txt + '\n')
675
676     # Footer & summary
677     OUTPUT.write("-"*78 + '\n')
678     percent = total_lines and str(int(100.0*total_covered/total_lines)) or "--"
679     OUTPUT.write("TOTAL".ljust(40) + str(total_lines).rjust(8) + \
680           str(total_covered).rjust(8) + str(percent).rjust(6)+"%" + '\n')
681     OUTPUT.write("-"*78 + '\n')
682
683     # Close logfile
684     if options.output:
685         OUTPUT.close()
686
687 #
688 # Produce an XML report in the Cobertura format
689 #
690 def print_xml_report(covdata):
691     branchTotal = 0
692     branchCovered = 0
693     lineTotal = 0
694     lineCovered = 0
695
696     options.show_branch = True
697     for key in covdata.keys():
698         (total, covered, percent) = covdata[key].coverage()
699         branchTotal += total
700         branchCovered += covered
701
702     options.show_branch = False
703     for key in covdata.keys():
704         (total, covered, percent) = covdata[key].coverage()
705         lineTotal += total
706         lineCovered += covered
707     
708     impl = xml.dom.minidom.getDOMImplementation()
709     docType = impl.createDocumentType(
710         "coverage", None,
711         "http://cobertura.sourceforge.net/xml/coverage-03.dtd" )
712     doc = impl.createDocument(None, "coverage", docType)
713     root = doc.documentElement
714     root.setAttribute( "line-rate", lineTotal == 0 and '0.0' or
715                        str(float(lineCovered) / lineTotal) )
716     root.setAttribute( "branch-rate", branchTotal == 0 and '0.0' or
717                        str(float(branchCovered) / branchTotal) )
718     root.setAttribute( "timestamp", str(int(time.time())) )
719     root.setAttribute( "version", "gcovr %s" % (version_str(),) )
720
721     # Generate the <sources> element: this is either the root directory
722     # (specified by --root), or the CWD.
723     sources = doc.createElement("sources")
724     root.appendChild(sources)
725
726     # Generate the coverage output (on a per-package basis)
727     packageXml = doc.createElement("packages")
728     root.appendChild(packageXml)
729     packages = {}
730     source_dirs = set()
731
732     keys = list(covdata.keys())
733     keys.sort()
734     for f in keys:
735         data = covdata[f]
736         dir = options.filter.sub('',f)
737         if f.endswith(dir):
738             src_path = f[:-1*len(dir)]
739             if len(src_path) > 0:
740                 while dir.startswith(os.path.sep):
741                     src_path += os.path.sep
742                     dir = dir[len(os.path.sep):]
743                 source_dirs.add(src_path)
744         else:
745             # Do no truncation if the filter does not start matching at
746             # the beginning of the string
747             dir = f
748         (dir, fname) = os.path.split(dir)
749         
750         package = packages.setdefault(
751             dir, [ doc.createElement("package"), {},
752                    0, 0, 0, 0 ] )
753         c = doc.createElement("class")
754         lines = doc.createElement("lines")
755         c.appendChild(lines)
756
757         class_lines = 0
758         class_hits = 0
759         class_branches = 0
760         class_branch_hits = 0
761         for line in data.all_lines:
762             hits = data.covered.get(line, 0)
763             class_lines += 1
764             if hits > 0:
765                 class_hits += 1
766             l = doc.createElement("line")
767             l.setAttribute("number", str(line))
768             l.setAttribute("hits", str(hits))
769             branches = data.branches.get(line)
770             if branches is None:
771                 l.setAttribute("branch", "false")
772             else:
773                 b_hits = 0
774                 for v in branches.values():
775                     if v > 0:
776                         b_hits += 1
777                 coverage = 100*b_hits/len(branches)
778                 l.setAttribute("branch", "true")
779                 l.setAttribute( "condition-coverage",
780                                 "%i%% (%i/%i)" %
781                                 (coverage, b_hits, len(branches)) )
782                 cond = doc.createElement('condition')
783                 cond.setAttribute("number", "0")
784                 cond.setAttribute("type", "jump")
785                 cond.setAttribute("coverage", "%i%%" % ( coverage ) )
786                 class_branch_hits += b_hits
787                 class_branches += float(len(branches))
788                 conditions = doc.createElement("conditions")
789                 conditions.appendChild(cond)
790                 l.appendChild(conditions)
791                 
792             lines.appendChild(l)
793
794         className = fname.replace('.', '_')
795         c.setAttribute("name", className)
796         c.setAttribute("filename", os.path.join(dir, fname))
797         c.setAttribute("line-rate", str(class_hits / (1.0*class_lines or 1.0)))
798         c.setAttribute( "branch-rate",
799                         str(class_branch_hits / (1.0*class_branches or 1.0)) )
800         c.setAttribute("complexity", "0.0")
801
802         package[1][className] = c
803         package[2] += class_hits
804         package[3] += class_lines
805         package[4] += class_branch_hits
806         package[5] += class_branches
807
808     for packageName, packageData in packages.items():
809         package = packageData[0];
810         packageXml.appendChild(package)
811         classes = doc.createElement("classes")
812         package.appendChild(classes)
813         classNames = list(packageData[1].keys())
814         classNames.sort()
815         for className in classNames:
816             classes.appendChild(packageData[1][className])
817         package.setAttribute("name", packageName.replace(os.sep, '.'))
818         package.setAttribute("line-rate", str(packageData[2]/(1.0*packageData[3] or 1.0)))
819         package.setAttribute( "branch-rate", str(packageData[4] / (1.0*packageData[5] or 1.0) ))
820         package.setAttribute("complexity", "0.0")
821
822
823     # Populate the <sources> element: this is either the root directory
824     # (specified by --root), or relative directories based
825     # on the filter, or the CWD
826     if options.root is not None:
827         source = doc.createElement("source")
828         source.appendChild(doc.createTextNode(options.root.strip()))
829         sources.appendChild(source)
830     elif len(source_dirs) > 0:
831         cwd = os.getcwd()
832         for d in source_dirs:
833             source = doc.createElement("source")
834             if d.startswith(cwd):
835                 reldir = d[len(cwd):].lstrip(os.path.sep)
836             elif cwd.startswith(d):
837                 i = 1
838                 while normpath(d) != normpath(os.path.join(*tuple([cwd]+['..']*i))):
839                     i += 1
840                 reldir = os.path.join(*tuple(['..']*i))
841             else:
842                 reldir = d
843             source.appendChild(doc.createTextNode(reldir.strip()))
844             sources.appendChild(source)
845     else:
846         source = doc.createElement("source")
847         source.appendChild(doc.createTextNode('.'))
848         sources.appendChild(source)
849
850     if options.prettyxml:
851         import textwrap
852         lines = doc.toprettyxml(" ").split('\n')
853         for i in xrange(len(lines)):
854             n=0
855             while n < len(lines[i]) and lines[i][n] == " ":
856                 n += 1
857             lines[i] = "\n".join(textwrap.wrap(lines[i], 78, break_long_words=False, break_on_hyphens=False, subsequent_indent=" "+ n*" "))
858         xmlString = "\n".join(lines)
859         #print textwrap.wrap(doc.toprettyxml(" "), 80)
860     else:
861         xmlString = doc.toprettyxml(indent="")
862     if options.output is None:
863         sys.stdout.write(xmlString+'\n')
864     else:
865         OUTPUT = open(options.output, 'w')
866         OUTPUT.write(xmlString +'\n')
867         OUTPUT.close()
868
869
870 ##
871 ## MAIN
872 ##
873
874 #
875 # Create option parser
876 #
877 parser = OptionParser()
878 parser.add_option("--version",
879         help="Print the version number, then exit",
880         action="store_true",
881         dest="version",
882         default=False)
883 parser.add_option("-v","--verbose",
884         help="Print progress messages",
885         action="store_true",
886         dest="verbose",
887         default=False)
888 parser.add_option('--object-directory',
889         help="Specify the directory that contains the gcov data files.  gcovr must be able to identify the path between the *.gcda files and the directory where gcc was originally run.  Normally, gcovr can guess correctly.  This option overrides gcovr's normal path detection and can specify either the path from gcc to the gcda file (i.e. what was passed to gcc's '-o' option), or the path from the gcda file to gcc's original working directory.",
890         action="store",
891         dest="objdir",
892         default=None)
893 parser.add_option("-o","--output",
894         help="Print output to this filename",
895         action="store",
896         dest="output",
897         default=None)
898 parser.add_option("-k","--keep",
899         help="Keep the temporary *.gcov files generated by gcov.  By default, these are deleted.",
900         action="store_true",
901         dest="keep",
902         default=False)
903 parser.add_option("-d","--delete",
904         help="Delete the coverage files after they are processed.  These are generated by the users's program, and by default gcovr does not remove these files.",
905         action="store_true",
906         dest="delete",
907         default=False)
908 parser.add_option("-f","--filter",
909         help="Keep only the data files that match this regular expression",
910         action="store",
911         dest="filter",
912         default=None)
913 parser.add_option("-e","--exclude",
914         help="Exclude data files that match this regular expression",
915         action="append",
916         dest="exclude",
917         default=[])
918 parser.add_option("--gcov-filter",
919         help="Keep only gcov data files that match this regular expression",
920         action="store",
921         dest="gcov_filter",
922         default=None)
923 parser.add_option("--gcov-exclude",
924         help="Exclude gcov data files that match this regular expression",
925         action="append",
926         dest="gcov_exclude",
927         default=[])
928 parser.add_option("-r","--root",
929         help="Defines the root directory.  This is used to filter the files, and to standardize the output.",
930         action="store",
931         dest="root",
932         default=None)
933 parser.add_option("-x","--xml",
934         help="Generate XML instead of the normal tabular output.",
935         action="store_true",
936         dest="xml",
937         default=False)
938 parser.add_option("--xml-pretty",
939         help="Generate pretty XML instead of the normal dense format.",
940         action="store_true",
941         dest="prettyxml",
942         default=False)
943 parser.add_option("-b","--branches",
944         help="Tabulate the branch coverage instead of the line coverage.",
945         action="store_true",
946         dest="show_branch",
947         default=None)
948 parser.add_option("-u","--sort-uncovered",
949         help="Sort entries by increasing number of uncovered lines.",
950         action="store_true",
951         dest="sort_uncovered",
952         default=None)
953 parser.add_option("-p","--sort-percentage",
954         help="Sort entries by decreasing percentage of covered lines.",
955         action="store_true",
956         dest="sort_percent",
957         default=None)
958 parser.usage="gcovr [options]"
959 parser.description="A utility to run gcov and generate a simple report that summarizes the coverage"
960 #
961 # Process options
962 #
963 (options, args) = parser.parse_args(args=sys.argv)
964 if options.version:
965     sys.stdout.write(
966         "gcovr %s\n"
967         "\n"
968         "Copyright (2008) Sandia Corporation. Under the terms of Contract\n"
969         "DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government\n"
970         "retains certain rights in this software.\n"
971         % (version_str(),) )
972     sys.exit(0)
973 if options.objdir:
974     tmp = options.objdir.replace('/',os.sep).replace('\\',os.sep)
975     while os.sep+os.sep in tmp:
976         tmp = tmp.replace(os.sep+os.sep, os.sep)
977     if normpath(options.objdir) != tmp:
978         sys.stderr.write(
979             "(WARNING) relative referencing in --object-directory.\n"
980             "\tthis could cause strange errors when gcovr attempts to\n"
981             "\tidentify the original gcc working directory.\n")
982 #
983 # Setup filters
984 #
985 for i in range(0,len(options.exclude)):
986     options.exclude[i] = re.compile(options.exclude[i])
987 if options.filter is not None:
988     options.filter = re.compile(options.filter)
989 elif options.root is not None:
990     if not options.root:
991         sys.stderr.write(
992             "(ERROR) empty --root option.\n"
993             "\tRoot specifies the path to the root directory of your project.\n"
994             "\tThis option cannot be an empty string.\n")
995         sys.exit(1)
996     options.filter = re.compile(re.escape(os.path.abspath(options.root)+os.sep))
997 if options.filter is None:
998     options.filter = re.compile('')
999 #
1000 for i in range(0,len(options.gcov_exclude)):
1001     options.gcov_exclude[i] = re.compile(options.gcov_exclude[i])
1002 if options.gcov_filter is not None:
1003     options.gcov_filter = re.compile(options.gcov_filter)
1004 else:
1005     options.gcov_filter = re.compile('')
1006 #
1007 # Get data files
1008 #
1009 if len(args) == 1:
1010     datafiles = get_datafiles(["."], options)
1011 else:
1012     datafiles = get_datafiles(args[1:], options)
1013 #
1014 # Get coverage data
1015 #
1016 covdata = {}
1017 for file in datafiles:
1018     process_datafile(file,covdata,options)
1019 if options.verbose:
1020     sys.stdout.write("Gathered coveraged data for "+str(len(covdata))+" files\n")
1021 #
1022 # Print report
1023 #
1024 if options.xml or options.prettyxml:
1025     print_xml_report(covdata)
1026 else:
1027     print_text_report(covdata)