source: gcovr/trunk/scripts/gcovr @ 2716

Revision 2716, 29.2 KB checked in by wehart, 2 years ago (diff)

Tagging gcovr 2.3

  • Property svn:executable set to *
  • Property svn:keywords set to Date Revision
Line 
1#! /usr/bin/env python
2#
3# A report generator for gcov 3.4
4#
5# This routine generates a format that is similar to the format generated
6# by the Python coverage.py module.  This code is similar to the
7# data processing performed by lcov's geninfo command.  However, we
8# don't worry about parsing the *.gcna files, and backwards compatibility for
9# older versions of gcov is not supported.
10#
11# Outstanding issues
12#   - verify that gcov 3.4 or newer is being used
13#   - verify support for symbolic links
14#
15# gcovr is a FAST project.  For documentation, bug reporting, and
16# updates, see https://software.sandia.gov/trac/fast/wiki/gcovr
17#
18# _________________________________________________________________________
19#
20# FAST: Utilities for Agile Software Development
21# Copyright (c) 2008 Sandia Corporation.
22# This software is distributed under the BSD License.
23# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
24# the U.S. Government retains certain rights in this software.
25# For more information, see the FAST README.txt file.
26#
27# $Revision 2716 $
28# $Date 2011-12-11 16:53:17 -0700 (Sun, 11 Dec 2011) $
29# _________________________________________________________________________
30#
31
32import copy
33import glob
34import os
35import re
36import subprocess
37import sys
38import time
39import xml.dom.minidom
40
41from optparse import OptionParser
42from string import Template
43from os.path import normpath
44
45__version__ = "2.4-prerelease"
46gcov_cmd = "gcov"
47
48output_re = re.compile("creating [`'](.*)'$")
49source_re = re.compile("cannot open (source|graph) file")
50
51#
52# Container object for coverage statistics
53#
54class CoverageData(object):
55
56    def __init__(self, fname, uncovered, covered, branches, noncode):
57        self.fname=fname
58        # Shallow copies are cheap & "safe" because the caller will
59        # throw away their copies of covered & uncovered after calling
60        # us exactly *once*
61        self.uncovered = copy.copy(uncovered)
62        self.covered   = copy.copy(covered)
63        self.noncode   = copy.copy(noncode)
64        # But, a deep copy is required here
65        self.all_lines = copy.deepcopy(uncovered)
66        self.all_lines.update(covered.keys())
67        self.branches = copy.deepcopy(branches)
68
69    def update(self, uncovered, covered, branches, noncode):
70        self.all_lines.update(uncovered)
71        self.all_lines.update(covered.keys())
72        self.uncovered.update(uncovered)
73        self.noncode.intersection_update(noncode)
74        for k in covered.keys():
75            self.covered[k] = self.covered.get(k,0) + covered[k]
76        for k in branches.keys():
77            for b in branches[k]:
78                d = self.branches.setdefault(k, {})
79                d[b] = d.get(b, 0) + branches[k][b]
80        self.uncovered.difference_update(self.covered.keys())
81
82    def uncovered_str(self):
83        if options.show_branch:
84            # Don't do any aggregation on branch results
85            tmp = []
86            for line in self.branches.keys():
87                for branch in self.branches[line]:
88                    if self.branches[line][branch] == 0:
89                        tmp.append(line)
90                        break
91
92            tmp.sort()
93            return ",".join([str(x) for x in tmp]) or ""
94       
95        tmp = list(self.uncovered)
96        if len(tmp) == 0:
97            return ""
98
99        tmp.sort()
100        first = None
101        last = None
102        ranges=[]
103        for item in tmp:
104            #print "HERE",item
105            if last is None:
106                first=item
107                last=item
108            elif item == (last+1):
109                last=item
110            else:
111                if len(self.noncode.intersection(range(last+1,item))) \
112                       == item - last - 1:
113                    last = item
114                    continue
115               
116                if first==last:
117                    ranges.append(str(first))
118                else:
119                    ranges.append(str(first)+"-"+str(last))
120                first=item
121                last=item
122        if first==last:
123            ranges.append(str(first))
124        else:
125            ranges.append(str(first)+"-"+str(last))
126        return ",".join(ranges)
127
128    def coverage(self):
129        if ( options.show_branch ):
130            total = 0
131            cover = 0
132            for line in self.branches.keys():
133                for branch in self.branches[line].keys():
134                    total += 1
135                    cover += self.branches[line][branch] > 0 and 1 or 0
136        else:
137            total = len(self.all_lines)
138            cover = len(self.covered)
139           
140        percent = total and str(int(100.0*cover/total)) or "--"
141        return (total, cover, percent)
142
143    def summary(self):
144        tmp = options.filter.sub('',self.fname)
145        if not self.fname.endswith(tmp):
146            # Do no truncation if the filter does not start matching at
147            # the beginning of the string
148            tmp = self.fname
149        tmp = tmp.ljust(40)
150        if len(tmp) > 40:
151            tmp=tmp+"\n"+" "*40
152
153        (total, cover, percent) = self.coverage()
154        return ( total, cover,
155                 tmp + str(total).rjust(8) + str(cover).rjust(8) + \
156                 percent.rjust(6) + "%   " + self.uncovered_str() )
157
158
159def search_file(expr, path=None, abspath=False, follow_links=False):
160    """
161    Given a search path, recursively descend to find files that match a
162    regular expression.
163
164    Can specify the following options:
165       path - The directory that is searched recursively
166       executable_extension - This string is used to see if there is an
167           implicit extension in the filename
168       executable - Test if the file is an executable (default=False)
169       isfile - Test if the file is file (default=True)
170    """
171    ans = []
172    pattern = re.compile(expr)
173    if path is None or path == ".":
174        path = os.getcwd()
175    elif not os.path.exists(path):
176        raise IOError, "Unknown directory '"+path+"'"
177    for root, dirs, files in os.walk(path, topdown=True):
178        for name in files:
179           if pattern.match(name):
180                name = os.path.join(root,name)
181                if follow_links and os.path.islink(name):
182                    ans.append( os.path.abspath(os.readlink(name)) )
183                elif abspath:
184                    ans.append( os.path.abspath(name) )
185                else:
186                    ans.append( name )
187    return ans
188
189
190#
191# Get the list of datafiles in the directories specified by the user
192#
193def get_datafiles(flist, options, ext="gcda"):
194    allfiles=[]
195    for dir in flist:
196        if options.verbose:
197            print "Scanning directory "+dir+" for "+ext+" files..."
198        files = search_file(".*\."+ext, dir, abspath=True, follow_links=True)
199        if options.verbose:
200            print "Found %d files " % len(files)
201        allfiles += files
202    return allfiles
203
204
205def process_gcov_data(file, covdata, options):
206    INPUT = open(file,"r")
207    #
208    # Get the filename
209    #
210    line = INPUT.readline()
211    segments=line.split(":")
212    fname = (segments[-1]).strip()
213    if fname[0] != os.sep:
214        #line = INPUT.readline()
215        #segments=line.split(":")
216        #fname = os.path.dirname((segments[-1]).strip())+os.sep+fname
217        fname = os.path.abspath(fname)
218    if options.verbose:
219        print "Parsing coverage data for file "+fname
220    #
221    # Return if the filename does not match the filter
222    #
223    if options.filter is not None and not options.filter.match(fname):
224        if options.verbose:
225            print "  Filtering coverage data for file "+fname
226        return
227    #
228    # Return if the filename matches the exclude pattern
229    #
230    for i in range(0,len(options.exclude)):
231        if options.exclude[i].match(options.filter.sub('',fname)) or \
232               options.exclude[i].match(fname) or \
233               options.exclude[i].match(os.path.abspath(fname)):
234            if options.verbose:
235                print "  Excluding coverage data for file "+fname
236            return
237    #
238    # Parse each line, and record the lines
239    # that are uncovered
240    #
241    noncode   = set()
242    uncovered = set()
243    covered   = {}
244    branches  = {}
245    #first_record=True
246    lineno = 0
247    for line in INPUT:
248        segments=line.split(":")
249        tmp = segments[0].strip()
250        try:
251            lineno = int(segments[1].strip())
252        except:
253            pass # keep previous line number!
254           
255        if tmp[0] == '#':
256            uncovered.add( lineno )
257        elif tmp[0] in "0123456789":
258            covered[lineno] = int(segments[0].strip())
259        elif tmp[0] == '-':
260            # remember certain non-executed lines
261            code = segments[2].strip()
262            if len(code) == 0 or code == "{" or code == "}" or \
263               code.startswith("//") or code == 'else':
264                noncode.add( lineno )
265        elif tmp.startswith('branch'):
266            fields = line.split()
267            try:
268                count = int(fields[3])
269                branches.setdefault(lineno, {})[int(fields[1])] = count
270            except:
271                # We ignore branches that were "never executed"
272                pass
273        elif tmp.startswith('call'):
274            pass
275        elif tmp.startswith('function'):
276            pass
277        elif tmp[0] == 'f':
278            pass
279            #if first_record:
280                #first_record=False
281                #uncovered.add(prev)
282            #if prev in uncovered:
283                #tokens=re.split('[ \t]+',tmp)
284                #if tokens[3] != "0":
285                    #uncovered.remove(prev)
286            #prev = int(segments[1].strip())
287            #first_record=True
288        else:
289            print "UNKNOWN LINE DATA:",tmp
290    #
291    # If the file is already in covdata, then we
292    # remove lines that are covered here.  Otherwise,
293    # initialize covdata
294    #
295    #print "HERE",fname
296    #print "HERE uncovered",uncovered
297    #print "HERE   covered",covered
298    if not fname in covdata:
299        covdata[fname] = CoverageData(fname,uncovered,covered,branches,noncode)
300    else:
301        #print "HERE B uncovered",covdata[fname].uncovered
302        #print "HERE B   covered",covdata[fname].covered
303        covdata[fname].update(uncovered,covered,branches,noncode)
304        #print "HERE A uncovered",covdata[fname].uncovered
305        #print "HERE A   covered",covdata[fname].covered
306    INPUT.close()
307
308#
309# Process a datafile (generated by running the instrumented application)
310# and run gcov with the corresponding arguments
311#
312# This is trickier than it sounds: The gcda/gcno files are stored in the
313# same directory as the object files; however, gcov must be run from the
314# same directory where gcc/g++ was run.  Normally, the user would know
315# where gcc/g++ was invoked from and could tell gcov the path to the
316# object (and gcda) files with the --object-directory command.
317# Unfortunately, we do everything backwards: gcovr looks for the gcda
318# files and then has to infer the original gcc working directory.
319#
320# In general, (but not always) we can assume that the gcda file is in a
321# subdirectory of the original gcc working directory, so we will first
322# try ".", and on error, move up the directory tree looking for the
323# correct working directory (letting gcov's own error codes dictate when
324# we hit the right directory).  This covers 90+% of the "normal" cases.
325# The exception to this is if gcc was invoked with "-o ../[...]" (i.e.,
326# the object directory was a peer (not a parent/child) of the cwd.  In
327# this case, things are really tough.  We accept an argument
328# (--object-directory) that SHOULD BE THE SAME as the one povided to
329# gcc.  We will then walk that path (backwards) in the hopes of
330# identifying the original gcc working directory (there is a bit of
331# trial-and-error here)
332#
333def process_datafile(filename, covdata, options):
334    #
335    # Launch gcov
336    #
337    abs_filename = os.path.abspath(filename)
338    (dirname,fname) = os.path.split(abs_filename)
339    #(name,ext) = os.path.splitext(base)
340
341    potential_wd = []
342    starting_dir = os.getcwd()
343    errors=[]
344    Done = False
345
346    if options.objdir:
347        src_components = abs_filename.split(os.sep)
348        components = normpath(options.objdir).split(os.sep)
349        idx = 1
350        while idx <= len(components):
351            if idx > len(src_components):
352                break
353            if components[-1*idx] != src_components[-1*idx]:
354                break
355            idx += 1
356        if idx > len(components):
357            pass # a parent dir; the normal process will find it
358        elif components[-1*idx] == '..':
359            dirs = [ os.path.join(src_components[:len(src_components)-idx+1]) ]
360            while idx <= len(components) and components[-1*idx] == '..':
361                tmp = []
362                for d in dirs:
363                    for f in os.listdir(d):
364                        x = os.path.join(d,f)
365                        if os.path.isdir(x):
366                            tmp.append(x)
367                dirs = tmp
368                idx += 1
369            potential_wd = dirs
370        else:
371            if components[0] == '':
372                # absolute path
373                tmp = [ options.objdir ]
374            else:
375                # relative path: check relative to both the cwd and the
376                # gcda file
377                tmp = [ os.path.join(x, options.objdir) for x in
378                        [os.path.dirname(abs_filename), os.getcwd()] ]
379            potential_wd = [ testdir for testdir in tmp
380                             if os.path.isdir(testdir) ]
381            if len(potential_wd) == 0:
382                errors.append("ERROR: cannot identify the location where GCC "
383                              "was run using --object-directory=%s\n" %
384                              options.objdir)
385            # Revert to the normal
386            #sys.exit(1)
387
388    # no objdir was specified (or it was a parent dir); walk up the dir tree
389    if len(potential_wd) == 0:
390        wd = os.path.split(abs_filename)[0]
391        while True:
392            potential_wd.append(wd)
393            wd = os.path.split(wd)[0]
394            if wd == potential_wd[-1]:
395                break
396
397    cmd = [ gcov_cmd, abs_filename,
398            "--branch-counts", "--branch-probabilities", "--preserve-paths",
399            '--object-directory', dirname ]
400
401    while len(potential_wd) > 0 and not Done:
402        # NB: either len(potential_wd) == 1, or all entires are absolute
403        # paths, so we don't have to chdir(starting_dir) at every
404        # iteration.
405        os.chdir(potential_wd.pop(0))
406       
407       
408        #if options.objdir:
409        #    cmd.extend(["--object-directory", Template(options.objdir).substitute(filename=filename, head=dirname, tail=base, root=name, ext=ext)])
410
411        if options.verbose:
412            print "Running gcov: '%s' in '%s'" % ( ' '.join(cmd), os.getcwd() )
413        (out, err) = subprocess.Popen( cmd,
414                                       stdout=subprocess.PIPE,
415                                       stderr=subprocess.PIPE ).communicate()
416
417        # find the files that gcov created
418        gcov_files = []
419        for line in out.split(os.linesep):
420            found = output_re.search(line)
421            if found is not None:
422                fname = found.group(1)
423                if not options.gcov_filter.match(fname):
424                    if options.verbose:
425                        print "Filtering gcov file",fname
426                    continue
427                exclude=False
428                for i in range(0,len(options.gcov_exclude)):
429                    if options.gcov_exclude[i].match(options.gcov_filter.sub('',fname)) or \
430                           options.gcov_exclude[i].match(fname) or \
431                           options.gcov_exclude[i].match(os.path.abspath(fname)):
432                        exclude=True
433                        break
434                if not exclude:
435                    gcov_files.append(fname)
436                elif options.verbose:
437                    print "Excluding gcov file",fname
438        #print "Output files\n" + "\n".join(gcov_files)
439
440        if source_re.search(err):
441            # gcov tossed errors: try the next potential_wd
442            errors.append(err)
443        else:
444            # Process *.gcov files
445            for fname in gcov_files:
446                process_gcov_data(fname, covdata, options)
447            Done = True
448
449        if not options.keep:
450            for fname in gcov_files:
451                os.remove(fname)
452
453    os.chdir(starting_dir)
454    if options.delete:
455        os.remove(abs_filename)
456       
457    if not Done:
458        print "GCOV produced the following errors processing %s:\n   %s" \
459              "(gcovr could not infer a working directory " \
460              "that resolved it.)" % ( filename, "   ".join(errors) )
461
462#
463# Produce the classic gcovr text report
464#
465def print_text_report(covdata):
466    def _num_uncovered(key):
467        (total, covered, percent) = covdata[key].coverage()
468        return total - covered
469    def _percent_uncovered(key):
470        (total, covered, percent) = covdata[key].coverage()
471        if covered:
472            return -1.0*covered/total
473        else:
474            return total or 1e6
475    def _alpha(key):
476        return key
477
478    if options.output:
479        OUTPUT = open(options.output,'w')
480    else:
481        OUTPUT = sys.stdout
482    total_lines=0
483    total_covered=0
484    # Header
485    print >>OUTPUT, "-"*78
486    a = options.show_branch and "Branch" or "Lines"
487    b = options.show_branch and "Taken" or "Exec"
488    print >>OUTPUT, "File".ljust(40) + a.rjust(8) + b.rjust(8)+ "  Cover   Missing"
489    print >>OUTPUT, "-"*78
490
491    # Data
492    keys = covdata.keys()
493    keys.sort(key=options.sort_uncovered and _num_uncovered or \
494              options.sort_percent and _percent_uncovered or _alpha)
495    for key in keys:
496        (t, n, txt) = covdata[key].summary()
497        total_lines += t
498        total_covered += n
499        print >>OUTPUT, txt
500
501    # Footer & summary
502    print >>OUTPUT, "-"*78
503    percent = total_lines and str(int(100.0*total_covered/total_lines)) or "--"
504    print >>OUTPUT, "TOTAL".ljust(40) + str(total_lines).rjust(8) + \
505          str(total_covered).rjust(8) + str(percent).rjust(6)+"%"
506    print >>OUTPUT, "-"*78
507
508    # Close logfile
509    if options.output:
510        OUTPUT.close()
511
512#
513# Produce an XML report in the Cobertura format
514#
515def print_xml_report(covdata):
516    branchTotal = 0
517    branchCovered = 0
518    lineTotal = 0
519    lineCovered = 0
520
521    options.show_branch = True
522    for key in covdata.keys():
523        (total, covered, percent) = covdata[key].coverage()
524        branchTotal += total
525        branchCovered += covered
526
527    options.show_branch = False
528    for key in covdata.keys():
529        (total, covered, percent) = covdata[key].coverage()
530        lineTotal += total
531        lineCovered += covered
532   
533    impl = xml.dom.minidom.getDOMImplementation()
534    docType = impl.createDocumentType(
535        "coverage", None,
536        "http://cobertura.sourceforge.net/xml/coverage-03.dtd" )
537    doc = impl.createDocument(None, "coverage", docType)
538    root = doc.documentElement
539    root.setAttribute( "line-rate", lineTotal == 0 and '0.0' or
540                       str(float(lineCovered) / lineTotal) )
541    root.setAttribute( "branch-rate", branchTotal == 0 and '0.0' or
542                       str(float(branchCovered) / branchTotal) )
543    root.setAttribute( "timestamp", str(int(time.time())) )
544    root.setAttribute( "version", "gcovr "+__version__ )
545
546    # Generate the <sources> element: this is either the root directory
547    # (specified by --root), or the CWD.
548    sources = doc.createElement("sources")
549    root.appendChild(sources)
550
551    # Generate the coverage output (on a per-package basis)
552    packageXml = doc.createElement("packages")
553    root.appendChild(packageXml)
554    packages = {}
555    source_dirs = set()
556
557    keys = covdata.keys()
558    keys.sort()
559    for f in keys:
560        data = covdata[f]
561        dir = options.filter.sub('',f)
562        if f.endswith(dir):
563            src_path = f[:-1*len(dir)]
564            if len(src_path) > 0:
565                while dir.startswith(os.path.sep):
566                    src_path += os.path.sep
567                    dir = dir[len(os.path.sep):]
568                source_dirs.add(src_path)
569        else:
570            # Do no truncation if the filter does not start matching at
571            # the beginning of the string
572            dir = f
573        (dir, fname) = os.path.split(dir)
574       
575        package = packages.setdefault(
576            dir, [ doc.createElement("package"), {},
577                   0, 0, 0, 0 ] )
578        c = doc.createElement("class")
579        lines = doc.createElement("lines")
580        c.appendChild(lines)
581
582        class_lines = 0
583        class_hits = 0
584        class_branches = 0
585        class_branch_hits = 0
586        for line in data.all_lines:
587            hits = data.covered.get(line, 0)
588            class_lines += 1
589            if hits > 0:
590                class_hits += 1
591            l = doc.createElement("line")
592            l.setAttribute("number", str(line))
593            l.setAttribute("hits", str(hits))
594            branches = data.branches.get(line)
595            if branches is None:
596                l.setAttribute("branch", "false")
597            else:
598                b_hits = 0
599                for v in branches.values():
600                    if v > 0:
601                        b_hits += 1
602                coverage = 100*b_hits/len(branches)
603                l.setAttribute("branch", "true")
604                l.setAttribute( "condition-coverage",
605                                "%i%% (%i/%i)" %
606                                (coverage, b_hits, len(branches)) )
607                cond = doc.createElement('condition')
608                cond.setAttribute("number", "0")
609                cond.setAttribute("type", "jump")
610                cond.setAttribute("coverage", "%i%%" % ( coverage ) )
611                class_branch_hits += b_hits
612                class_branches += float(len(branches))
613                conditions = doc.createElement("conditions")
614                conditions.appendChild(cond)
615                l.appendChild(conditions)
616               
617            lines.appendChild(l)
618
619        className = fname.replace('.', '_')
620        c.setAttribute("name", className)
621        c.setAttribute("filename", os.path.join(dir, fname))
622        c.setAttribute("line-rate", str(class_hits / (1.0*class_lines or 1.0)))
623        c.setAttribute( "branch-rate",
624                        str(class_branch_hits / (1.0*class_branches or 1.0)) )
625        c.setAttribute("complexity", "0.0")
626
627        package[1][className] = c
628        package[2] += class_hits
629        package[3] += class_lines
630        package[4] += class_branch_hits
631        package[5] += class_branches
632
633    for packageName, packageData in packages.items():
634        package = packageData[0];
635        packageXml.appendChild(package)
636        classes = doc.createElement("classes")
637        package.appendChild(classes)
638        classNames = packageData[1].keys()
639        classNames.sort()
640        for className in classNames:
641            classes.appendChild(packageData[1][className])
642        package.setAttribute("name", packageName.replace(os.sep, '.'))
643        package.setAttribute("line-rate", str(packageData[2]/(1.0*packageData[3] or 1.0)))
644        package.setAttribute( "branch-rate", str(packageData[4] / (1.0*packageData[5] or 1.0) ))
645        package.setAttribute("complexity", "0.0")
646
647
648    # Populate the <sources> element: this is either the root directory
649    # (specified by --root), or relative directories based
650    # on the filter, or the CWD
651    if options.root is not None:
652        source = doc.createElement("source")
653        source.appendChild(doc.createTextNode(options.root))
654        sources.appendChild(source)
655    elif len(source_dirs) > 0:
656        cwd = os.getcwd()
657        for d in source_dirs:
658            source = doc.createElement("source")
659            if d.startswith(cwd):
660                reldir = d[len(cwd):].lstrip(os.path.sep)
661            elif cwd.startswith(d):
662                i = 1
663                print d
664                print os.path.join(*tuple([cwd]+['..']*i))
665                while normpath(d) != \
666                          normpath(os.path.join(*tuple([cwd]+['..']*i))):
667                    i += 1
668                reldir = os.path.join(*tuple(['..']*i))
669            else:
670                reldir = d
671            source.appendChild(doc.createTextNode(reldir))
672            sources.appendChild(source)
673    else:
674        source = doc.createElement("source")
675        source.appendChild(doc.createTextNode('.'))
676        sources.appendChild(source)
677
678    xmlString = doc.toprettyxml()
679    #xml.dom.ext.PrettyPrint(doc)
680    if options.output is None:
681        print xmlString
682    else:
683        OUTPUT = open(options.output, 'w')
684        print >>OUTPUT, xmlString
685        OUTPUT.close()
686
687
688##
689## MAIN
690##
691
692#
693# Create option parser
694#
695parser = OptionParser()
696parser.add_option("--version",
697        help="Print the version number, then exit",
698        action="store_true",
699        dest="version",
700        default=False)
701parser.add_option("-v","--verbose",
702        help="Print progress messages",
703        action="store_true",
704        dest="verbose",
705        default=False)
706parser.add_option('--object-directory',
707        help="Specify the directory that contains the gcov data files.  gcovr must be able to identify the path between the *.gcda files and the directory where gcc was originally run.  Normally, gcovr can guess correctly.  This option overrides gcovr's normal path detection and can specify either the path from gcc to the gcda file (i.e. what was passed to gcc's '-o' option), or the path from the gcda file to gcc's original working directory.",
708        action="store",
709        dest="objdir",
710        default=None)
711parser.add_option("-o","--output",
712        help="Print output to this filename",
713        action="store",
714        dest="output",
715        default=None)
716parser.add_option("-k","--keep",
717        help="Keep temporary gcov files",
718        action="store_true",
719        dest="keep",
720        default=False)
721parser.add_option("-d","--delete",
722        help="Delete the coverage files after they are processed",
723        action="store_true",
724        dest="delete",
725        default=False)
726parser.add_option("-f","--filter",
727        help="Keep only the data files that match this regular expression",
728        action="store",
729        dest="filter",
730        default=None)
731parser.add_option("-e","--exclude",
732        help="Exclude data files that match this regular expression",
733        action="append",
734        dest="exclude",
735        default=[])
736parser.add_option("--gcov-filter",
737        help="Keep only gcov data files that match this regular expression",
738        action="store",
739        dest="gcov_filter",
740        default=None)
741parser.add_option("--gcov-exclude",
742        help="Exclude gcov data files that match this regular expression",
743        action="append",
744        dest="gcov_exclude",
745        default=[])
746parser.add_option("-r","--root",
747        help="Defines the root directory.  This is used to filter the files, and to standardize the output.",
748        action="store",
749        dest="root",
750        default=None)
751parser.add_option("-x","--xml",
752        help="Generate XML instead of the normal tabular output.",
753        action="store_true",
754        dest="xml",
755        default=None)
756parser.add_option("-b","--branches",
757        help="Tabulate the branch coverage instead of the line coverage.",
758        action="store_true",
759        dest="show_branch",
760        default=None)
761parser.add_option("-u","--sort-uncovered",
762        help="Sort entries by increasing number of uncovered lines.",
763        action="store_true",
764        dest="sort_uncovered",
765        default=None)
766parser.add_option("-p","--sort-percentage",
767        help="Sort entries by decreasing percentage of covered lines.",
768        action="store_true",
769        dest="sort_percent",
770        default=None)
771parser.usage="gcovr [options]"
772parser.description="A utility to run gcov and generate a simple report that summarizes the coverage"
773#
774# Process options
775#
776(options, args) = parser.parse_args(args=sys.argv)
777if options.version:
778    print "gcovr "+__version__
779    print ""
780    print "Copyright (2008) Sandia Corporation. Under the terms of Contract "
781    print "DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government "
782    print "retains certain rights in this software."
783    sys.exit(0)
784if options.objdir:
785    if normpath(options.objdir) != options.objdir.replace('/',os.sep):
786        print "WARNING: relative referencing in --object-directory; this could"
787        print "         cause strange errors when gcovr attempts to identify"
788        print "         the original gcc working directory."
789#
790# Setup filters
791#
792for i in range(0,len(options.exclude)):
793    options.exclude[i] = re.compile(options.exclude[i])
794if options.filter is not None:
795    options.filter = re.compile(options.filter)
796elif options.root is not None:
797    options.filter = re.compile(re.escape(os.path.abspath(options.root)+os.sep))
798if options.filter is None:
799    options.filter = re.compile('')
800#
801for i in range(0,len(options.gcov_exclude)):
802    options.gcov_exclude[i] = re.compile(options.gcov_exclude[i])
803if options.gcov_filter is not None:
804    options.gcov_filter = re.compile(options.gcov_filter)
805else:
806    options.gcov_filter = re.compile('')
807#
808# Get data files
809#
810if len(args) == 1:
811    datafiles = get_datafiles(["."], options)
812else:
813    datafiles = get_datafiles(args[1:], options)
814#
815# Get coverage data
816#
817covdata = {}
818for file in datafiles:
819    process_datafile(file,covdata,options)
820if options.verbose:
821    print "Gathered coveraged data for "+str(len(covdata))+" files"
822#
823# Print report
824#
825if options.xml:
826    print_xml_report(covdata)
827else:
828    print_text_report(covdata)
Note: See TracBrowser for help on using the repository browser.