source: gcovr/trunk/scripts/gcovr @ 2774

Revision 2774, 35.2 KB checked in by jdsiiro, 2 years ago (diff)

Tagging gcovr 2.4

  • Property svn:executable set to *
  • Property svn:keywords set to Date Revision
Line 
1#! /usr/bin/env python
2#
3# A report generator for gcov 3.4
4#
5# This routine generates a format that is similar to the format generated
6# by the Python coverage.py module.  This code is similar to the
7# data processing performed by lcov's geninfo command.  However, we
8# don't worry about parsing the *.gcna files, and backwards compatibility for
9# older versions of gcov is not supported.
10#
11# Outstanding issues
12#   - verify that gcov 3.4 or newer is being used
13#   - verify support for symbolic links
14#
15# gcovr is a FAST project.  For documentation, bug reporting, and
16# updates, see https://software.sandia.gov/trac/fast/wiki/gcovr
17#
18# _________________________________________________________________________
19#
20# FAST: Utilities for Agile Software Development
21# Copyright (c) 2008 Sandia Corporation.
22# This software is distributed under the BSD License.
23# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
24# the U.S. Government retains certain rights in this software.
25# For more information, see the FAST README.txt file.
26#
27# $Revision 2774 $
28# $Date 2012-04-13 17:26:55 -0700 (Fri, 13 Apr 2012) $
29# _________________________________________________________________________
30#
31
32import copy
33import glob
34import os
35import re
36import subprocess
37import sys
38import time
39import xml.dom.minidom
40
41from optparse import OptionParser
42from string import Template
43from os.path import normpath
44
45__version__ = "2.5-prerelease"
46src_revision = "$Revision 2774 $"
47gcov_cmd = "gcov"
48
49output_re = re.compile("[Cc]reating [`'](.*)'$")
50source_re = re.compile("cannot open (source|graph) file")
51
52def version_str():
53    ans = __version__
54    m = re.match('\$Revision:\s*(\S+)\s*\$', src_revision)
55    if m:
56        ans = ans + " (r%s)" % (m.group(1))
57    return ans
58
59#
60# Container object for coverage statistics
61#
62class CoverageData(object):
63
64    def __init__(self, fname, uncovered, covered, branches, noncode):
65        self.fname=fname
66        # Shallow copies are cheap & "safe" because the caller will
67        # throw away their copies of covered & uncovered after calling
68        # us exactly *once*
69        self.uncovered = copy.copy(uncovered)
70        self.covered   = copy.copy(covered)
71        self.noncode   = copy.copy(noncode)
72        # But, a deep copy is required here
73        self.all_lines = copy.deepcopy(uncovered)
74        self.all_lines.update(covered.keys())
75        self.branches = copy.deepcopy(branches)
76
77    def update(self, uncovered, covered, branches, noncode):
78        self.all_lines.update(uncovered)
79        self.all_lines.update(covered.keys())
80        self.uncovered.update(uncovered)
81        self.noncode.intersection_update(noncode)
82        for k in covered.keys():
83            self.covered[k] = self.covered.get(k,0) + covered[k]
84        for k in branches.keys():
85            for b in branches[k]:
86                d = self.branches.setdefault(k, {})
87                d[b] = d.get(b, 0) + branches[k][b]
88        self.uncovered.difference_update(self.covered.keys())
89
90    def uncovered_str(self):
91        if options.show_branch:
92            # Don't do any aggregation on branch results
93            tmp = []
94            for line in self.branches.keys():
95                for branch in self.branches[line]:
96                    if self.branches[line][branch] == 0:
97                        tmp.append(line)
98                        break
99
100            tmp.sort()
101            return ",".join([str(x) for x in tmp]) or ""
102       
103        tmp = list(self.uncovered)
104        if len(tmp) == 0:
105            return ""
106
107        tmp.sort()
108        first = None
109        last = None
110        ranges=[]
111        for item in tmp:
112            if last is None:
113                first=item
114                last=item
115            elif item == (last+1):
116                last=item
117            else:
118                if len(self.noncode.intersection(range(last+1,item))) \
119                       == item - last - 1:
120                    last = item
121                    continue
122               
123                if first==last:
124                    ranges.append(str(first))
125                else:
126                    ranges.append(str(first)+"-"+str(last))
127                first=item
128                last=item
129        if first==last:
130            ranges.append(str(first))
131        else:
132            ranges.append(str(first)+"-"+str(last))
133        return ",".join(ranges)
134
135    def coverage(self):
136        if ( options.show_branch ):
137            total = 0
138            cover = 0
139            for line in self.branches.keys():
140                for branch in self.branches[line].keys():
141                    total += 1
142                    cover += self.branches[line][branch] > 0 and 1 or 0
143        else:
144            total = len(self.all_lines)
145            cover = len(self.covered)
146           
147        percent = total and str(int(100.0*cover/total)) or "--"
148        return (total, cover, percent)
149
150    def summary(self):
151        tmp = options.filter.sub('',self.fname)
152        if not self.fname.endswith(tmp):
153            # Do no truncation if the filter does not start matching at
154            # the beginning of the string
155            tmp = self.fname
156        tmp = tmp.ljust(40)
157        if len(tmp) > 40:
158            tmp=tmp+"\n"+" "*40
159
160        (total, cover, percent) = self.coverage()
161        return ( total, cover,
162                 tmp + str(total).rjust(8) + str(cover).rjust(8) + \
163                 percent.rjust(6) + "%   " + self.uncovered_str() )
164
165
166def resolve_symlinks(orig_path):
167    """
168    Return the normalized absolute path name with all symbolic links resolved
169    """
170    drive,tmp = os.path.splitdrive(os.path.abspath(orig_path))
171    if not drive:
172        drive = os.path.sep
173    parts = tmp.split(os.path.sep)
174    actual_path = [drive]
175    while parts:
176        actual_path.append(parts.pop(0))
177        if not os.path.islink(os.path.join(*actual_path)):
178            continue
179        actual_path[-1] = os.readlink(os.path.join(*actual_path))
180        tmp_drive, tmp_path = os.path.splitdrive(
181            resolve_symlinks(os.path.join(*actual_path)) )
182        if tmp_drive:
183            drive = tmp_drive
184        actual_path = [drive] + tmp_path.split(os.path.sep)
185    return os.path.join(*actual_path)
186
187
188def path_startswith(path, base):
189    return path.startswith(base) and (
190        len(base) == len(path) or path[len(base)] == os.path.sep )
191
192
193class PathAliaser(object):
194    def __init__(self):
195        self.aliases = {}
196        self.master_targets = set()
197        self.preferred_name = {}
198
199    def master_path(self, path):
200        match_found = False
201        while True:
202            for base, alias in self.aliases.items():
203                if path_startswith(path, base):
204                    path = alias + path[len(base):]
205                    match_found = True
206                    break
207            for master_base in self.master_targets:
208                if path_startswith(path, master_base):
209                    return path, master_base, True
210            if match_found:
211                sys.stderr.write(
212                    "(ERROR) violating fundamental assumption while walking "
213                    "directory tree.\n\tPlease report this to the gcovr "
214                    "developers.\n" )
215            return path, None, match_found
216
217    def unalias_path(self, path):
218        path = resolve_symlinks(path)
219        path, master_base, known_path = self.master_path(path)
220        if not known_path:
221            return path
222        # Try and resolve the preferred name for this location
223        if master_base in self.preferred_name:
224            return self.preferred_name[master_base] + path[len(master_base):]
225        return path
226
227    def add_master_target(self, master):
228        self.master_targets.add(master)
229
230    def add_alias(self, target, master):
231        self.aliases[target] = master
232
233    def set_preferred(self, master, preferred):
234        self.preferred_name[master] = preferred
235
236aliases = PathAliaser()
237
238# This is UGLY.  Here's why: UNIX resolves symbolic links by walking the
239# entire directory structure.  What that means is that relative links
240# are always relative to the actual directory inode, and not the
241# "virtual" path that the user might have traversed (over symlinks) on
242# the way to that directory.  Here's the canonical example:
243#
244#   a / b / c / testfile
245#   a / d / e --> ../../a/b
246#   m / n --> /a
247#   x / y / z --> /m/n/d
248#
249# If we start in "y", we will see the following directory structure:
250#   y
251#   |-- z
252#       |-- e
253#           |-- c
254#               |-- testfile
255#
256# The problem is that using a simple traversal based on the Python
257# documentation:
258#
259#    (os.path.join(os.path.dirname(path), os.readlink(result)))
260#
261# will not work: we will see a link to /m/n/d from /x/y, but completely
262# miss the fact that n is itself a link.  If we then naively attempt to
263# apply the "c" relative link, we get an intermediate path that looks
264# like "/m/n/d/e/../../a/b", which would get normalized to "/m/n/a/b"; a
265# nonexistant path.  The solution is that we need to walk the original
266# path, along with the full path of all links 1 directory at a time and
267# check for embedded symlinks.
268#
269def link_walker(path):
270    targets = [os.path.abspath(path)]
271    while targets:
272        target_dir = targets.pop(0)
273        actual_dir = resolve_symlinks(target_dir)
274        #print "target dir: %s  (%s)" % (target_dir, actual_dir)
275        master_name, master_base, visited = aliases.master_path(actual_dir)
276        if visited:
277            #print "  ...root already visited as %s" % master_name
278            aliases.add_alias(target_dir, master_name)
279            continue
280        if master_name != target_dir:
281            aliases.set_preferred(master_name, target_dir)
282            aliases.add_alias(target_dir, master_name)
283        aliases.add_master_target(master_name)
284        #print "  ...master name = %s" % master_name
285        #print "  ...walking %s" % target_dir
286        for root, dirs, files in os.walk(target_dir, topdown=True):
287            #print "    ...reading %s" % root
288            for d in dirs:
289                tmp = os.path.abspath(os.path.join(root, d))
290                #print "    ...checking %s" % tmp
291                if os.path.islink(tmp):
292                    #print "      ...buffering link %s" % tmp
293                    targets.append(tmp)
294            yield root, dirs, files
295
296
297def search_file(expr, path):
298    """
299    Given a search path, recursively descend to find files that match a
300    regular expression.
301    """
302    ans = []
303    pattern = re.compile(expr)
304    if path is None or path == ".":
305        path = os.getcwd()
306    elif not os.path.exists(path):
307        raise IOError("Unknown directory '"+path+"'")
308    for root, dirs, files in link_walker(path):
309        for name in files:
310            if pattern.match(name):
311                name = os.path.join(root,name)
312                if os.path.islink(name):
313                    ans.append( os.path.abspath(os.readlink(name)) )
314                else:
315                    ans.append( os.path.abspath(name) )
316    return ans
317
318
319#
320# Get the list of datafiles in the directories specified by the user
321#
322def get_datafiles(flist, options):
323    allfiles=[]
324    for dir in flist:
325        if options.verbose:
326            sys.stdout.write( "Scanning directory %s for gcda/gcno files...\n"
327                              % (dir, ) )
328        files = search_file(".*\.gc(da|no)$", dir)
329        # gcno files will *only* produce uncovered results; however,
330        # that is useful information for the case where a compilation
331        # unit is never actually exercised by the test code.  So, we
332        # will process gcno files, but ONLY if there is no corresponding
333        # gcda file.
334        gcda_files = [file for file in files if file.endswith('gcda')]
335        tmp = set(gcda_files)
336        gcno_files = [ file for file in files if
337                       file.endswith('gcno') and file[:-2]+'da' not in tmp ]
338        if options.verbose:
339            sys.stdout.write(
340                "Found %d files (and will process %d)\n" %
341                ( len(files), len(gcda_files) + len(gcno_files) ) )
342        allfiles.extend(gcda_files)
343        allfiles.extend(gcno_files)
344    return allfiles
345
346
347def process_gcov_data(file, covdata, options):
348    INPUT = open(file,"r")
349    #
350    # Get the filename
351    #
352    line = INPUT.readline()
353    segments=line.split(':',3)
354    if len(segments) != 4 or not segments[2].lower().strip().endswith('source'):
355        raise RuntimeError('Fatal error parsing gcov file, line 1: \n\t"%s"' % line.rstrip())
356    fname = aliases.unalias_path(os.path.abspath((segments[-1]).strip()))
357    if options.verbose:
358        sys.stdout.write("Parsing coverage data for file %s\n" % fname)
359    #
360    # Return if the filename does not match the filter
361    #
362    if not options.filter.match(fname):
363        if options.verbose:
364            sys.stdout.write("  Filtering coverage data for file %s\n" % fname)
365        return
366    #
367    # Return if the filename matches the exclude pattern
368    #
369    for i in range(0,len(options.exclude)):
370        if options.exclude[i].match(options.filter.sub('',fname)) or \
371               options.exclude[i].match(fname) or \
372               options.exclude[i].match(os.path.abspath(fname)):
373            if options.verbose:
374                sys.stdout.write("  Excluding coverage data for file %s\n" % fname)
375            return
376    #
377    # Parse each line, and record the lines
378    # that are uncovered
379    #
380    noncode   = set()
381    uncovered = set()
382    covered   = {}
383    branches  = {}
384    #first_record=True
385    lineno = 0
386    for line in INPUT:
387        segments=line.split(":",2)
388        tmp = segments[0].strip()
389        if len(segments) > 1:
390            try:
391                lineno = int(segments[1].strip())
392            except:
393                pass # keep previous line number!
394           
395        if tmp[0] == '#':
396            uncovered.add( lineno )
397        elif tmp[0] in "0123456789":
398            covered[lineno] = int(segments[0].strip())
399        elif tmp[0] == '-':
400            # remember certain non-executed lines
401            code = segments[2].strip()
402            if len(code) == 0 or code == "{" or code == "}" or \
403               code.startswith("//") or code == 'else':
404                noncode.add( lineno )
405        elif tmp.startswith('branch'):
406            fields = line.split()
407            try:
408                count = int(fields[3])
409                branches.setdefault(lineno, {})[int(fields[1])] = count
410            except:
411                # We ignore branches that were "never executed"
412                pass
413        elif tmp.startswith('call'):
414            pass
415        elif tmp.startswith('function'):
416            pass
417        elif tmp[0] == 'f':
418            pass
419            #if first_record:
420                #first_record=False
421                #uncovered.add(prev)
422            #if prev in uncovered:
423                #tokens=re.split('[ \t]+',tmp)
424                #if tokens[3] != "0":
425                    #uncovered.remove(prev)
426            #prev = int(segments[1].strip())
427            #first_record=True
428        else:
429            sys.stderr.write(
430                "(WARNING) Unrecognized GCOV output: '%s'\n"
431                "\tThis is indicitive of a gcov output parse error.\n"
432                "\tPlease report this to the gcovr developers." % tmp )
433    #
434    # If the file is already in covdata, then we
435    # remove lines that are covered here.  Otherwise,
436    # initialize covdata
437    #
438    if not fname in covdata:
439        covdata[fname] = CoverageData(fname,uncovered,covered,branches,noncode)
440    else:
441        covdata[fname].update(uncovered,covered,branches,noncode)
442    INPUT.close()
443
444#
445# Process a datafile (generated by running the instrumented application)
446# and run gcov with the corresponding arguments
447#
448# This is trickier than it sounds: The gcda/gcno files are stored in the
449# same directory as the object files; however, gcov must be run from the
450# same directory where gcc/g++ was run.  Normally, the user would know
451# where gcc/g++ was invoked from and could tell gcov the path to the
452# object (and gcda) files with the --object-directory command.
453# Unfortunately, we do everything backwards: gcovr looks for the gcda
454# files and then has to infer the original gcc working directory.
455#
456# In general, (but not always) we can assume that the gcda file is in a
457# subdirectory of the original gcc working directory, so we will first
458# try ".", and on error, move up the directory tree looking for the
459# correct working directory (letting gcov's own error codes dictate when
460# we hit the right directory).  This covers 90+% of the "normal" cases.
461# The exception to this is if gcc was invoked with "-o ../[...]" (i.e.,
462# the object directory was a peer (not a parent/child) of the cwd.  In
463# this case, things are really tough.  We accept an argument
464# (--object-directory) that SHOULD BE THE SAME as the one povided to
465# gcc.  We will then walk that path (backwards) in the hopes of
466# identifying the original gcc working directory (there is a bit of
467# trial-and-error here)
468#
469def process_datafile(filename, covdata, options):
470    #
471    # Launch gcov
472    #
473    abs_filename = os.path.abspath(filename)
474    (dirname,fname) = os.path.split(abs_filename)
475    #(name,ext) = os.path.splitext(base)
476
477    potential_wd = []
478    starting_dir = os.getcwd()
479    errors=[]
480    Done = False
481
482    if options.objdir:
483        src_components = abs_filename.split(os.sep)
484        components = normpath(options.objdir).split(os.sep)
485        idx = 1
486        while idx <= len(components):
487            if idx > len(src_components):
488                break
489            if components[-1*idx] != src_components[-1*idx]:
490                break
491            idx += 1
492        if idx > len(components):
493            pass # a parent dir; the normal process will find it
494        elif components[-1*idx] == '..':
495            dirs = [ os.path.join(src_components[:len(src_components)-idx+1]) ]
496            while idx <= len(components) and components[-1*idx] == '..':
497                tmp = []
498                for d in dirs:
499                    for f in os.listdir(d):
500                        x = os.path.join(d,f)
501                        if os.path.isdir(x):
502                            tmp.append(x)
503                dirs = tmp
504                idx += 1
505            potential_wd = dirs
506        else:
507            if components[0] == '':
508                # absolute path
509                tmp = [ options.objdir ]
510            else:
511                # relative path: check relative to both the cwd and the
512                # gcda file
513                tmp = [ os.path.join(x, options.objdir) for x in
514                        [os.path.dirname(abs_filename), os.getcwd()] ]
515            potential_wd = [ testdir for testdir in tmp
516                             if os.path.isdir(testdir) ]
517            if len(potential_wd) == 0:
518                errors.append("ERROR: cannot identify the location where GCC "
519                              "was run using --object-directory=%s\n" %
520                              options.objdir)
521            # Revert to the normal
522            #sys.exit(1)
523
524    # no objdir was specified (or it was a parent dir); walk up the dir tree
525    if len(potential_wd) == 0:
526        wd = os.path.split(abs_filename)[0]
527        while True:
528            potential_wd.append(wd)
529            wd = os.path.split(wd)[0]
530            if wd == potential_wd[-1]:
531                break
532
533    cmd = [ gcov_cmd, abs_filename,
534            "--branch-counts", "--branch-probabilities", "--preserve-paths",
535            '--object-directory', dirname ]
536
537    # NB: We are lazy English speakers, so we will only parse English output
538    env = dict(os.environ)
539    env['LC_ALL'] = 'en_US'
540   
541
542    while len(potential_wd) > 0 and not Done:
543        # NB: either len(potential_wd) == 1, or all entires are absolute
544        # paths, so we don't have to chdir(starting_dir) at every
545        # iteration.
546        os.chdir(potential_wd.pop(0))
547       
548       
549        #if options.objdir:
550        #    cmd.extend(["--object-directory", Template(options.objdir).substitute(filename=filename, head=dirname, tail=base, root=name, ext=ext)])
551
552        if options.verbose:
553            sys.stdout.write("Running gcov: '%s' in '%s'\n" % ( ' '.join(cmd), os.getcwd() ))
554        (out, err) = subprocess.Popen( cmd, env=env,
555                                       stdout=subprocess.PIPE,
556                                       stderr=subprocess.PIPE ).communicate()
557        out=out.decode('utf-8')
558        err=err.decode('utf-8')
559
560        # find the files that gcov created
561        gcov_files = {'active':[], 'filter':[], 'exclude':[]}
562        for line in out.splitlines():
563            found = output_re.search(line.strip())
564            if found is not None:
565                fname = found.group(1)
566                if not options.gcov_filter.match(fname):
567                    if options.verbose:
568                        sys.stdout.write("Filtering gcov file %s\n" % fname)
569                    gcov_files['filter'].append(fname)
570                    continue
571                exclude=False
572                for i in range(0,len(options.gcov_exclude)):
573                    if options.gcov_exclude[i].match(options.gcov_filter.sub('',fname)) or \
574                           options.gcov_exclude[i].match(fname) or \
575                           options.gcov_exclude[i].match(os.path.abspath(fname)):
576                        exclude=True
577                        break
578                if not exclude:
579                    gcov_files['active'].append(fname)
580                elif options.verbose:
581                    sys.stdout.write("Excluding gcov file %s\n" % fname)
582                    gcov_files['exclude'].append(fname)
583
584        if source_re.search(err):
585            # gcov tossed errors: try the next potential_wd
586            errors.append(err)
587        else:
588            # Process *.gcov files
589            for fname in gcov_files['active']:
590                process_gcov_data(fname, covdata, options)
591            Done = True
592
593        if not options.keep:
594            for group in gcov_files.values():
595                for fname in group:
596                    os.remove(fname)
597
598    os.chdir(starting_dir)
599    if options.delete:
600        if not abs_filename.endswith('gcno'):
601            os.remove(abs_filename)
602       
603    if not Done:
604        sys.stderr.write(
605            "(WARNING) GCOV produced the following errors processing %s:\n"
606            "\t   %s"
607            "\t(gcovr could not infer a working directory that resolved it.)\n"
608            % ( filename, "\t   ".join(errors) ) )
609
610#
611# Produce the classic gcovr text report
612#
613def print_text_report(covdata):
614    def _num_uncovered(key):
615        (total, covered, percent) = covdata[key].coverage()
616        return total - covered
617    def _percent_uncovered(key):
618        (total, covered, percent) = covdata[key].coverage()
619        if covered:
620            return -1.0*covered/total
621        else:
622            return total or 1e6
623    def _alpha(key):
624        return key
625
626    if options.output:
627        OUTPUT = open(options.output,'w')
628    else:
629        OUTPUT = sys.stdout
630    total_lines=0
631    total_covered=0
632    # Header
633    OUTPUT.write("-"*78 + '\n')
634    a = options.show_branch and "Branch" or "Lines"
635    b = options.show_branch and "Taken" or "Exec"
636    OUTPUT.write("File".ljust(40) + a.rjust(8) + b.rjust(8)+ "  Cover   Missing\n")
637    OUTPUT.write("-"*78 + '\n')
638
639    # Data
640    keys = list(covdata.keys())
641    keys.sort(key=options.sort_uncovered and _num_uncovered or \
642              options.sort_percent and _percent_uncovered or _alpha)
643    for key in keys:
644        (t, n, txt) = covdata[key].summary()
645        total_lines += t
646        total_covered += n
647        OUTPUT.write(txt + '\n')
648
649    # Footer & summary
650    OUTPUT.write("-"*78 + '\n')
651    percent = total_lines and str(int(100.0*total_covered/total_lines)) or "--"
652    OUTPUT.write("TOTAL".ljust(40) + str(total_lines).rjust(8) + \
653          str(total_covered).rjust(8) + str(percent).rjust(6)+"%" + '\n')
654    OUTPUT.write("-"*78 + '\n')
655
656    # Close logfile
657    if options.output:
658        OUTPUT.close()
659
660#
661# Produce an XML report in the Cobertura format
662#
663def print_xml_report(covdata):
664    branchTotal = 0
665    branchCovered = 0
666    lineTotal = 0
667    lineCovered = 0
668
669    options.show_branch = True
670    for key in covdata.keys():
671        (total, covered, percent) = covdata[key].coverage()
672        branchTotal += total
673        branchCovered += covered
674
675    options.show_branch = False
676    for key in covdata.keys():
677        (total, covered, percent) = covdata[key].coverage()
678        lineTotal += total
679        lineCovered += covered
680   
681    impl = xml.dom.minidom.getDOMImplementation()
682    docType = impl.createDocumentType(
683        "coverage", None,
684        "http://cobertura.sourceforge.net/xml/coverage-03.dtd" )
685    doc = impl.createDocument(None, "coverage", docType)
686    root = doc.documentElement
687    root.setAttribute( "line-rate", lineTotal == 0 and '0.0' or
688                       str(float(lineCovered) / lineTotal) )
689    root.setAttribute( "branch-rate", branchTotal == 0 and '0.0' or
690                       str(float(branchCovered) / branchTotal) )
691    root.setAttribute( "timestamp", str(int(time.time())) )
692    root.setAttribute( "version", "gcovr %s" % (version_str(),) )
693
694    # Generate the <sources> element: this is either the root directory
695    # (specified by --root), or the CWD.
696    sources = doc.createElement("sources")
697    root.appendChild(sources)
698
699    # Generate the coverage output (on a per-package basis)
700    packageXml = doc.createElement("packages")
701    root.appendChild(packageXml)
702    packages = {}
703    source_dirs = set()
704
705    keys = list(covdata.keys())
706    keys.sort()
707    for f in keys:
708        data = covdata[f]
709        dir = options.filter.sub('',f)
710        if f.endswith(dir):
711            src_path = f[:-1*len(dir)]
712            if len(src_path) > 0:
713                while dir.startswith(os.path.sep):
714                    src_path += os.path.sep
715                    dir = dir[len(os.path.sep):]
716                source_dirs.add(src_path)
717        else:
718            # Do no truncation if the filter does not start matching at
719            # the beginning of the string
720            dir = f
721        (dir, fname) = os.path.split(dir)
722       
723        package = packages.setdefault(
724            dir, [ doc.createElement("package"), {},
725                   0, 0, 0, 0 ] )
726        c = doc.createElement("class")
727        lines = doc.createElement("lines")
728        c.appendChild(lines)
729
730        class_lines = 0
731        class_hits = 0
732        class_branches = 0
733        class_branch_hits = 0
734        for line in data.all_lines:
735            hits = data.covered.get(line, 0)
736            class_lines += 1
737            if hits > 0:
738                class_hits += 1
739            l = doc.createElement("line")
740            l.setAttribute("number", str(line))
741            l.setAttribute("hits", str(hits))
742            branches = data.branches.get(line)
743            if branches is None:
744                l.setAttribute("branch", "false")
745            else:
746                b_hits = 0
747                for v in branches.values():
748                    if v > 0:
749                        b_hits += 1
750                coverage = 100*b_hits/len(branches)
751                l.setAttribute("branch", "true")
752                l.setAttribute( "condition-coverage",
753                                "%i%% (%i/%i)" %
754                                (coverage, b_hits, len(branches)) )
755                cond = doc.createElement('condition')
756                cond.setAttribute("number", "0")
757                cond.setAttribute("type", "jump")
758                cond.setAttribute("coverage", "%i%%" % ( coverage ) )
759                class_branch_hits += b_hits
760                class_branches += float(len(branches))
761                conditions = doc.createElement("conditions")
762                conditions.appendChild(cond)
763                l.appendChild(conditions)
764               
765            lines.appendChild(l)
766
767        className = fname.replace('.', '_')
768        c.setAttribute("name", className)
769        c.setAttribute("filename", os.path.join(dir, fname))
770        c.setAttribute("line-rate", str(class_hits / (1.0*class_lines or 1.0)))
771        c.setAttribute( "branch-rate",
772                        str(class_branch_hits / (1.0*class_branches or 1.0)) )
773        c.setAttribute("complexity", "0.0")
774
775        package[1][className] = c
776        package[2] += class_hits
777        package[3] += class_lines
778        package[4] += class_branch_hits
779        package[5] += class_branches
780
781    for packageName, packageData in packages.items():
782        package = packageData[0];
783        packageXml.appendChild(package)
784        classes = doc.createElement("classes")
785        package.appendChild(classes)
786        classNames = list(packageData[1].keys())
787        classNames.sort()
788        for className in classNames:
789            classes.appendChild(packageData[1][className])
790        package.setAttribute("name", packageName.replace(os.sep, '.'))
791        package.setAttribute("line-rate", str(packageData[2]/(1.0*packageData[3] or 1.0)))
792        package.setAttribute( "branch-rate", str(packageData[4] / (1.0*packageData[5] or 1.0) ))
793        package.setAttribute("complexity", "0.0")
794
795
796    # Populate the <sources> element: this is either the root directory
797    # (specified by --root), or relative directories based
798    # on the filter, or the CWD
799    if options.root is not None:
800        source = doc.createElement("source")
801        source.appendChild(doc.createTextNode(options.root))
802        sources.appendChild(source)
803    elif len(source_dirs) > 0:
804        cwd = os.getcwd()
805        for d in source_dirs:
806            source = doc.createElement("source")
807            if d.startswith(cwd):
808                reldir = d[len(cwd):].lstrip(os.path.sep)
809            elif cwd.startswith(d):
810                i = 1
811                while normpath(d) != \
812                          normpath(os.path.join(*tuple([cwd]+['..']*i))):
813                    i += 1
814                reldir = os.path.join(*tuple(['..']*i))
815            else:
816                reldir = d
817            source.appendChild(doc.createTextNode(reldir))
818            sources.appendChild(source)
819    else:
820        source = doc.createElement("source")
821        source.appendChild(doc.createTextNode('.'))
822        sources.appendChild(source)
823
824    xmlString = doc.toprettyxml()
825    #xml.dom.ext.PrettyPrint(doc)
826    if options.output is None:
827        sys.stdout.write(xmlString+'\n')
828    else:
829        OUTPUT = open(options.output, 'w')
830        OUTPUT.write(xmlString +'\n')
831        OUTPUT.close()
832
833
834##
835## MAIN
836##
837
838#
839# Create option parser
840#
841parser = OptionParser()
842parser.add_option("--version",
843        help="Print the version number, then exit",
844        action="store_true",
845        dest="version",
846        default=False)
847parser.add_option("-v","--verbose",
848        help="Print progress messages",
849        action="store_true",
850        dest="verbose",
851        default=False)
852parser.add_option('--object-directory',
853        help="Specify the directory that contains the gcov data files.  gcovr must be able to identify the path between the *.gcda files and the directory where gcc was originally run.  Normally, gcovr can guess correctly.  This option overrides gcovr's normal path detection and can specify either the path from gcc to the gcda file (i.e. what was passed to gcc's '-o' option), or the path from the gcda file to gcc's original working directory.",
854        action="store",
855        dest="objdir",
856        default=None)
857parser.add_option("-o","--output",
858        help="Print output to this filename",
859        action="store",
860        dest="output",
861        default=None)
862parser.add_option("-k","--keep",
863        help="Keep temporary gcov files",
864        action="store_true",
865        dest="keep",
866        default=False)
867parser.add_option("-d","--delete",
868        help="Delete the coverage files after they are processed",
869        action="store_true",
870        dest="delete",
871        default=False)
872parser.add_option("-f","--filter",
873        help="Keep only the data files that match this regular expression",
874        action="store",
875        dest="filter",
876        default=None)
877parser.add_option("-e","--exclude",
878        help="Exclude data files that match this regular expression",
879        action="append",
880        dest="exclude",
881        default=[])
882parser.add_option("--gcov-filter",
883        help="Keep only gcov data files that match this regular expression",
884        action="store",
885        dest="gcov_filter",
886        default=None)
887parser.add_option("--gcov-exclude",
888        help="Exclude gcov data files that match this regular expression",
889        action="append",
890        dest="gcov_exclude",
891        default=[])
892parser.add_option("-r","--root",
893        help="Defines the root directory.  This is used to filter the files, and to standardize the output.",
894        action="store",
895        dest="root",
896        default=None)
897parser.add_option("-x","--xml",
898        help="Generate XML instead of the normal tabular output.",
899        action="store_true",
900        dest="xml",
901        default=None)
902parser.add_option("-b","--branches",
903        help="Tabulate the branch coverage instead of the line coverage.",
904        action="store_true",
905        dest="show_branch",
906        default=None)
907parser.add_option("-u","--sort-uncovered",
908        help="Sort entries by increasing number of uncovered lines.",
909        action="store_true",
910        dest="sort_uncovered",
911        default=None)
912parser.add_option("-p","--sort-percentage",
913        help="Sort entries by decreasing percentage of covered lines.",
914        action="store_true",
915        dest="sort_percent",
916        default=None)
917parser.usage="gcovr [options]"
918parser.description="A utility to run gcov and generate a simple report that summarizes the coverage"
919#
920# Process options
921#
922(options, args) = parser.parse_args(args=sys.argv)
923if options.version:
924    sys.stdout.write(
925        "gcovr %s\n"
926        "\n"
927        "Copyright (2008) Sandia Corporation. Under the terms of Contract\n"
928        "DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government\n"
929        "retains certain rights in this software.\n"
930        % (version_str(),) )
931    sys.exit(0)
932if options.objdir:
933    tmp = options.objdir.replace('/',os.sep).replace('\\',os.sep)
934    while os.sep+os.sep in tmp:
935        tmp = tmp.replace(os.sep+os.sep, os.sep)
936    if normpath(options.objdir) != tmp:
937        sys.stderr.write(
938            "(WARNING) relative referencing in --object-directory.\n"
939            "\tthis could cause strange errors when gcovr attempts to\n"
940            "\tidentify the original gcc working directory.\n")
941#
942# Setup filters
943#
944for i in range(0,len(options.exclude)):
945    options.exclude[i] = re.compile(options.exclude[i])
946if options.filter is not None:
947    options.filter = re.compile(options.filter)
948elif options.root is not None:
949    if not options.root:
950        sys.stderr.write(
951            "(ERROR) empty --root option.\n"
952            "\tRoot specifies the path to the root directory of your project\n"
953            "\tand cannot be an empty string.\n")
954        sys.exit(1)
955    options.filter = re.compile(re.escape(os.path.abspath(options.root)+os.sep))
956if options.filter is None:
957    options.filter = re.compile('')
958#
959for i in range(0,len(options.gcov_exclude)):
960    options.gcov_exclude[i] = re.compile(options.gcov_exclude[i])
961if options.gcov_filter is not None:
962    options.gcov_filter = re.compile(options.gcov_filter)
963else:
964    options.gcov_filter = re.compile('')
965#
966# Get data files
967#
968if len(args) == 1:
969    datafiles = get_datafiles(["."], options)
970else:
971    datafiles = get_datafiles(args[1:], options)
972#
973# Get coverage data
974#
975covdata = {}
976for file in datafiles:
977    process_datafile(file,covdata,options)
978if options.verbose:
979    sys.stdout.write("Gathered coveraged data for "+str(len(covdata))+" files\n")
980#
981# Print report
982#
983if options.xml:
984    print_xml_report(covdata)
985else:
986    print_text_report(covdata)
Note: See TracBrowser for help on using the repository browser.