source: gcovr/trunk/scripts/gcovr @ 2726

Revision 2726, 29.3 KB checked in by wehart, 2 years ago (diff)

Misc portability fix.

  • Property svn:executable set to *
  • Property svn:keywords set to Date Revision
Line 
1#! /usr/bin/env python
2#
3# A report generator for gcov 3.4
4#
5# This routine generates a format that is similar to the format generated
6# by the Python coverage.py module.  This code is similar to the
7# data processing performed by lcov's geninfo command.  However, we
8# don't worry about parsing the *.gcna files, and backwards compatibility for
9# older versions of gcov is not supported.
10#
11# Outstanding issues
12#   - verify that gcov 3.4 or newer is being used
13#   - verify support for symbolic links
14#
15# gcovr is a FAST project.  For documentation, bug reporting, and
16# updates, see https://software.sandia.gov/trac/fast/wiki/gcovr
17#
18# _________________________________________________________________________
19#
20# FAST: Utilities for Agile Software Development
21# Copyright (c) 2008 Sandia Corporation.
22# This software is distributed under the BSD License.
23# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
24# the U.S. Government retains certain rights in this software.
25# For more information, see the FAST README.txt file.
26#
27# $Revision 2726 $
28# $Date 2012-01-06 19:30:48 -0700 (Fri, 06 Jan 2012) $
29# _________________________________________________________________________
30#
31
32import copy
33import glob
34import os
35import re
36import subprocess
37import sys
38import time
39import xml.dom.minidom
40
41from optparse import OptionParser
42from string import Template
43from os.path import normpath
44
45__version__ = "2.4-prerelease"
46gcov_cmd = "gcov"
47
48output_re = re.compile("creating [`'](.*)'$")
49source_re = re.compile("cannot open (source|graph) file")
50
51#
52# Container object for coverage statistics
53#
54class CoverageData(object):
55
56    def __init__(self, fname, uncovered, covered, branches, noncode):
57        self.fname=fname
58        # Shallow copies are cheap & "safe" because the caller will
59        # throw away their copies of covered & uncovered after calling
60        # us exactly *once*
61        self.uncovered = copy.copy(uncovered)
62        self.covered   = copy.copy(covered)
63        self.noncode   = copy.copy(noncode)
64        # But, a deep copy is required here
65        self.all_lines = copy.deepcopy(uncovered)
66        self.all_lines.update(list(covered.keys()))
67        self.branches = copy.deepcopy(branches)
68
69    def update(self, uncovered, covered, branches, noncode):
70        self.all_lines.update(uncovered)
71        self.all_lines.update(list(covered.keys()))
72        self.uncovered.update(uncovered)
73        self.noncode.intersection_update(noncode)
74        for k in list(covered.keys()):
75            self.covered[k] = self.covered.get(k,0) + covered[k]
76        for k in list(branches.keys()):
77            for b in branches[k]:
78                d = self.branches.setdefault(k, {})
79                d[b] = d.get(b, 0) + branches[k][b]
80        self.uncovered.difference_update(list(self.covered.keys()))
81
82    def uncovered_str(self):
83        if options.show_branch:
84            # Don't do any aggregation on branch results
85            tmp = []
86            for line in list(self.branches.keys()):
87                for branch in self.branches[line]:
88                    if self.branches[line][branch] == 0:
89                        tmp.append(line)
90                        break
91
92            tmp.sort()
93            return ",".join([str(x) for x in tmp]) or ""
94       
95        tmp = list(self.uncovered)
96        if len(tmp) == 0:
97            return ""
98
99        tmp.sort()
100        first = None
101        last = None
102        ranges=[]
103        for item in tmp:
104            if last is None:
105                first=item
106                last=item
107            elif item == (last+1):
108                last=item
109            else:
110                if len(self.noncode.intersection(list(range(last+1,item)))) \
111                       == item - last - 1:
112                    last = item
113                    continue
114               
115                if first==last:
116                    ranges.append(str(first))
117                else:
118                    ranges.append(str(first)+"-"+str(last))
119                first=item
120                last=item
121        if first==last:
122            ranges.append(str(first))
123        else:
124            ranges.append(str(first)+"-"+str(last))
125        return ",".join(ranges)
126
127    def coverage(self):
128        if ( options.show_branch ):
129            total = 0
130            cover = 0
131            for line in list(self.branches.keys()):
132                for branch in list(self.branches[line].keys()):
133                    total += 1
134                    cover += self.branches[line][branch] > 0 and 1 or 0
135        else:
136            total = len(self.all_lines)
137            cover = len(self.covered)
138           
139        percent = total and str(int(100.0*cover/total)) or "--"
140        return (total, cover, percent)
141
142    def summary(self):
143        tmp = options.filter.sub('',self.fname)
144        if not self.fname.endswith(tmp):
145            # Do no truncation if the filter does not start matching at
146            # the beginning of the string
147            tmp = self.fname
148        tmp = tmp.ljust(40)
149        if len(tmp) > 40:
150            tmp=tmp+"\n"+" "*40
151
152        (total, cover, percent) = self.coverage()
153        return ( total, cover,
154                 tmp + str(total).rjust(8) + str(cover).rjust(8) + \
155                 percent.rjust(6) + "%   " + self.uncovered_str() )
156
157
158def search_file(expr, path=None, abspath=False, follow_links=False):
159    """
160    Given a search path, recursively descend to find files that match a
161    regular expression.
162
163    Can specify the following options:
164       path - The directory that is searched recursively
165       executable_extension - This string is used to see if there is an
166           implicit extension in the filename
167       executable - Test if the file is an executable (default=False)
168       isfile - Test if the file is file (default=True)
169    """
170    ans = []
171    pattern = re.compile(expr)
172    if path is None or path == ".":
173        path = os.getcwd()
174    elif not os.path.exists(path):
175        raise IOError("Unknown directory '"+path+"'")
176    for root, dirs, files in os.walk(path, topdown=True):
177        for name in files:
178           if pattern.match(name):
179                name = os.path.join(root,name)
180                if follow_links and os.path.islink(name):
181                    ans.append( os.path.abspath(os.readlink(name)) )
182                elif abspath:
183                    ans.append( os.path.abspath(name) )
184                else:
185                    ans.append( name )
186    return ans
187
188
189#
190# Get the list of datafiles in the directories specified by the user
191#
192def get_datafiles(flist, options, ext="gcda"):
193    allfiles=[]
194    for dir in flist:
195        if options.verbose:
196            sys.stdout.write("Scanning directory "+dir+" for "+ext+" files...\n")
197        files = search_file(".*\."+ext, dir, abspath=True, follow_links=True)
198        if options.verbose:
199            sys.stdout.write("Found %d files \n" % len(files))
200        allfiles += files
201    return allfiles
202
203
204def process_gcov_data(file, covdata, options):
205    INPUT = open(file,"r")
206    #
207    # Get the filename
208    #
209    line = INPUT.readline()
210    segments=line.split(":")
211    fname = (segments[-1]).strip()
212    if fname[0] != os.sep:
213        #line = INPUT.readline()
214        #segments=line.split(":")
215        #fname = os.path.dirname((segments[-1]).strip())+os.sep+fname
216        fname = os.path.abspath(fname)
217    if options.verbose:
218        sys.stdout.write("Parsing coverage data for file %s\n" % fname)
219    #
220    # Return if the filename does not match the filter
221    #
222    if options.filter is not None and not options.filter.match(fname):
223        if options.verbose:
224            sys.stdout.write("  Filtering coverage data for file %s\n" % fname)
225        return
226    #
227    # Return if the filename matches the exclude pattern
228    #
229    for i in range(0,len(options.exclude)):
230        if options.exclude[i].match(options.filter.sub('',fname)) or \
231               options.exclude[i].match(fname) or \
232               options.exclude[i].match(os.path.abspath(fname)):
233            if options.verbose:
234                sys.stdout.write("  Excluding coverage data for file %s\n" % fname)
235            return
236    #
237    # Parse each line, and record the lines
238    # that are uncovered
239    #
240    noncode   = set()
241    uncovered = set()
242    covered   = {}
243    branches  = {}
244    #first_record=True
245    lineno = 0
246    for line in INPUT:
247        segments=line.split(":")
248        tmp = segments[0].strip()
249        try:
250            lineno = int(segments[1].strip())
251        except:
252            pass # keep previous line number!
253           
254        if tmp[0] == '#':
255            uncovered.add( lineno )
256        elif tmp[0] in "0123456789":
257            covered[lineno] = int(segments[0].strip())
258        elif tmp[0] == '-':
259            # remember certain non-executed lines
260            code = segments[2].strip()
261            if len(code) == 0 or code == "{" or code == "}" or \
262               code.startswith("//") or code == 'else':
263                noncode.add( lineno )
264        elif tmp.startswith('branch'):
265            fields = line.split()
266            try:
267                count = int(fields[3])
268                branches.setdefault(lineno, {})[int(fields[1])] = count
269            except:
270                # We ignore branches that were "never executed"
271                pass
272        elif tmp.startswith('call'):
273            pass
274        elif tmp.startswith('function'):
275            pass
276        elif tmp[0] == 'f':
277            pass
278            #if first_record:
279                #first_record=False
280                #uncovered.add(prev)
281            #if prev in uncovered:
282                #tokens=re.split('[ \t]+',tmp)
283                #if tokens[3] != "0":
284                    #uncovered.remove(prev)
285            #prev = int(segments[1].strip())
286            #first_record=True
287        else:
288            sys.stdout.write("UNKNOWN LINE DATA: %s\n" % tmp)
289    #
290    # If the file is already in covdata, then we
291    # remove lines that are covered here.  Otherwise,
292    # initialize covdata
293    #
294    if not fname in covdata:
295        covdata[fname] = CoverageData(fname,uncovered,covered,branches,noncode)
296    else:
297        covdata[fname].update(uncovered,covered,branches,noncode)
298    INPUT.close()
299
300#
301# Process a datafile (generated by running the instrumented application)
302# and run gcov with the corresponding arguments
303#
304# This is trickier than it sounds: The gcda/gcno files are stored in the
305# same directory as the object files; however, gcov must be run from the
306# same directory where gcc/g++ was run.  Normally, the user would know
307# where gcc/g++ was invoked from and could tell gcov the path to the
308# object (and gcda) files with the --object-directory command.
309# Unfortunately, we do everything backwards: gcovr looks for the gcda
310# files and then has to infer the original gcc working directory.
311#
312# In general, (but not always) we can assume that the gcda file is in a
313# subdirectory of the original gcc working directory, so we will first
314# try ".", and on error, move up the directory tree looking for the
315# correct working directory (letting gcov's own error codes dictate when
316# we hit the right directory).  This covers 90+% of the "normal" cases.
317# The exception to this is if gcc was invoked with "-o ../[...]" (i.e.,
318# the object directory was a peer (not a parent/child) of the cwd.  In
319# this case, things are really tough.  We accept an argument
320# (--object-directory) that SHOULD BE THE SAME as the one povided to
321# gcc.  We will then walk that path (backwards) in the hopes of
322# identifying the original gcc working directory (there is a bit of
323# trial-and-error here)
324#
325def process_datafile(filename, covdata, options):
326    #
327    # Launch gcov
328    #
329    abs_filename = os.path.abspath(filename)
330    (dirname,fname) = os.path.split(abs_filename)
331    #(name,ext) = os.path.splitext(base)
332
333    potential_wd = []
334    starting_dir = os.getcwd()
335    errors=[]
336    Done = False
337
338    if options.objdir:
339        src_components = abs_filename.split(os.sep)
340        components = normpath(options.objdir).split(os.sep)
341        idx = 1
342        while idx <= len(components):
343            if idx > len(src_components):
344                break
345            if components[-1*idx] != src_components[-1*idx]:
346                break
347            idx += 1
348        if idx > len(components):
349            pass # a parent dir; the normal process will find it
350        elif components[-1*idx] == '..':
351            dirs = [ os.path.join(src_components[:len(src_components)-idx+1]) ]
352            while idx <= len(components) and components[-1*idx] == '..':
353                tmp = []
354                for d in dirs:
355                    for f in os.listdir(d):
356                        x = os.path.join(d,f)
357                        if os.path.isdir(x):
358                            tmp.append(x)
359                dirs = tmp
360                idx += 1
361            potential_wd = dirs
362        else:
363            if components[0] == '':
364                # absolute path
365                tmp = [ options.objdir ]
366            else:
367                # relative path: check relative to both the cwd and the
368                # gcda file
369                tmp = [ os.path.join(x, options.objdir) for x in
370                        [os.path.dirname(abs_filename), os.getcwd()] ]
371            potential_wd = [ testdir for testdir in tmp
372                             if os.path.isdir(testdir) ]
373            if len(potential_wd) == 0:
374                errors.append("ERROR: cannot identify the location where GCC "
375                              "was run using --object-directory=%s\n" %
376                              options.objdir)
377            # Revert to the normal
378            #sys.exit(1)
379
380    # no objdir was specified (or it was a parent dir); walk up the dir tree
381    if len(potential_wd) == 0:
382        wd = os.path.split(abs_filename)[0]
383        while True:
384            potential_wd.append(wd)
385            wd = os.path.split(wd)[0]
386            if wd == potential_wd[-1]:
387                break
388
389    cmd = [ gcov_cmd, abs_filename,
390            "--branch-counts", "--branch-probabilities", "--preserve-paths",
391            '--object-directory', dirname ]
392
393    while len(potential_wd) > 0 and not Done:
394        # NB: either len(potential_wd) == 1, or all entires are absolute
395        # paths, so we don't have to chdir(starting_dir) at every
396        # iteration.
397        os.chdir(potential_wd.pop(0))
398       
399       
400        #if options.objdir:
401        #    cmd.extend(["--object-directory", Template(options.objdir).substitute(filename=filename, head=dirname, tail=base, root=name, ext=ext)])
402
403        if options.verbose:
404            sys.stdout.write("Running gcov: '%s' in '%s'\n" % ( ' '.join(cmd), os.getcwd() ))
405        (out, err) = subprocess.Popen( cmd,
406                                       stdout=subprocess.PIPE,
407                                       stderr=subprocess.PIPE ).communicate()
408        out=str(out)
409
410        # find the files that gcov created
411        gcov_files = []
412        for line in out.split(os.linesep):
413            found = output_re.search(line)
414            if found is not None:
415                fname = found.group(1)
416                if not options.gcov_filter.match(fname):
417                    if options.verbose:
418                        sys.stdout.write("Filtering gcov file %s\n" % fname)
419                    continue
420                exclude=False
421                for i in range(0,len(options.gcov_exclude)):
422                    if options.gcov_exclude[i].match(options.gcov_filter.sub('',fname)) or \
423                           options.gcov_exclude[i].match(fname) or \
424                           options.gcov_exclude[i].match(os.path.abspath(fname)):
425                        exclude=True
426                        break
427                if not exclude:
428                    gcov_files.append(fname)
429                elif options.verbose:
430                    sys.stdout.write("Excluding gcov file %s\n" % fname)
431
432        if source_re.search(err):
433            # gcov tossed errors: try the next potential_wd
434            errors.append(err)
435        else:
436            # Process *.gcov files
437            for fname in gcov_files:
438                process_gcov_data(fname, covdata, options)
439            Done = True
440
441        if not options.keep:
442            for fname in gcov_files:
443                os.remove(fname)
444
445    os.chdir(starting_dir)
446    if options.delete:
447        os.remove(abs_filename)
448       
449    if not Done:
450        sys.stdout.write("GCOV produced the following errors processing %s:\n   %s" \
451              "(gcovr could not infer a working directory " \
452              "that resolved it.)\n" % ( filename, "   ".join(errors) ))
453
454#
455# Produce the classic gcovr text report
456#
457def print_text_report(covdata):
458    def _num_uncovered(key):
459        (total, covered, percent) = covdata[key].coverage()
460        return total - covered
461    def _percent_uncovered(key):
462        (total, covered, percent) = covdata[key].coverage()
463        if covered:
464            return -1.0*covered/total
465        else:
466            return total or 1e6
467    def _alpha(key):
468        return key
469
470    if options.output:
471        OUTPUT = open(options.output,'w')
472    else:
473        OUTPUT = sys.stdout
474    total_lines=0
475    total_covered=0
476    # Header
477    OUTPUT.write("-"*78 + '\n')
478    a = options.show_branch and "Branch" or "Lines"
479    b = options.show_branch and "Taken" or "Exec"
480    OUTPUT.write("File".ljust(40) + a.rjust(8) + b.rjust(8)+ "  Cover   Missing\n")
481    OUTPUT.write("-"*78 + '\n')
482
483    # Data
484    keys = list(covdata.keys())
485    keys.sort(key=options.sort_uncovered and _num_uncovered or \
486              options.sort_percent and _percent_uncovered or _alpha)
487    for key in keys:
488        (t, n, txt) = covdata[key].summary()
489        total_lines += t
490        total_covered += n
491        OUTPUT.write(txt + '\n')
492
493    # Footer & summary
494    OUTPUT.write("-"*78 + '\n')
495    percent = total_lines and str(int(100.0*total_covered/total_lines)) or "--"
496    OUTPUT.write("TOTAL".ljust(40) + str(total_lines).rjust(8) + \
497          str(total_covered).rjust(8) + str(percent).rjust(6)+"%" + '\n')
498    OUTPUT.write("-"*78 + '\n')
499
500    # Close logfile
501    if options.output:
502        OUTPUT.close()
503
504#
505# Produce an XML report in the Cobertura format
506#
507def print_xml_report(covdata):
508    branchTotal = 0
509    branchCovered = 0
510    lineTotal = 0
511    lineCovered = 0
512
513    options.show_branch = True
514    for key in list(covdata.keys()):
515        (total, covered, percent) = covdata[key].coverage()
516        branchTotal += total
517        branchCovered += covered
518
519    options.show_branch = False
520    for key in list(covdata.keys()):
521        (total, covered, percent) = covdata[key].coverage()
522        lineTotal += total
523        lineCovered += covered
524   
525    impl = xml.dom.minidom.getDOMImplementation()
526    docType = impl.createDocumentType(
527        "coverage", None,
528        "http://cobertura.sourceforge.net/xml/coverage-03.dtd" )
529    doc = impl.createDocument(None, "coverage", docType)
530    root = doc.documentElement
531    root.setAttribute( "line-rate", lineTotal == 0 and '0.0' or
532                       str(float(lineCovered) / lineTotal) )
533    root.setAttribute( "branch-rate", branchTotal == 0 and '0.0' or
534                       str(float(branchCovered) / branchTotal) )
535    root.setAttribute( "timestamp", str(int(time.time())) )
536    root.setAttribute( "version", "gcovr "+__version__ )
537
538    # Generate the <sources> element: this is either the root directory
539    # (specified by --root), or the CWD.
540    sources = doc.createElement("sources")
541    root.appendChild(sources)
542
543    # Generate the coverage output (on a per-package basis)
544    packageXml = doc.createElement("packages")
545    root.appendChild(packageXml)
546    packages = {}
547    source_dirs = set()
548
549    keys = list(covdata.keys())
550    keys.sort()
551    for f in keys:
552        data = covdata[f]
553        dir = options.filter.sub('',f)
554        if f.endswith(dir):
555            src_path = f[:-1*len(dir)]
556            if len(src_path) > 0:
557                while dir.startswith(os.path.sep):
558                    src_path += os.path.sep
559                    dir = dir[len(os.path.sep):]
560                source_dirs.add(src_path)
561        else:
562            # Do no truncation if the filter does not start matching at
563            # the beginning of the string
564            dir = f
565        (dir, fname) = os.path.split(dir)
566       
567        package = packages.setdefault(
568            dir, [ doc.createElement("package"), {},
569                   0, 0, 0, 0 ] )
570        c = doc.createElement("class")
571        lines = doc.createElement("lines")
572        c.appendChild(lines)
573
574        class_lines = 0
575        class_hits = 0
576        class_branches = 0
577        class_branch_hits = 0
578        for line in data.all_lines:
579            hits = data.covered.get(line, 0)
580            class_lines += 1
581            if hits > 0:
582                class_hits += 1
583            l = doc.createElement("line")
584            l.setAttribute("number", str(line))
585            l.setAttribute("hits", str(hits))
586            branches = data.branches.get(line)
587            if branches is None:
588                l.setAttribute("branch", "false")
589            else:
590                b_hits = 0
591                for v in list(branches.values()):
592                    if v > 0:
593                        b_hits += 1
594                coverage = 100*b_hits/len(branches)
595                l.setAttribute("branch", "true")
596                l.setAttribute( "condition-coverage",
597                                "%i%% (%i/%i)" %
598                                (coverage, b_hits, len(branches)) )
599                cond = doc.createElement('condition')
600                cond.setAttribute("number", "0")
601                cond.setAttribute("type", "jump")
602                cond.setAttribute("coverage", "%i%%" % ( coverage ) )
603                class_branch_hits += b_hits
604                class_branches += float(len(branches))
605                conditions = doc.createElement("conditions")
606                conditions.appendChild(cond)
607                l.appendChild(conditions)
608               
609            lines.appendChild(l)
610
611        className = fname.replace('.', '_')
612        c.setAttribute("name", className)
613        c.setAttribute("filename", os.path.join(dir, fname))
614        c.setAttribute("line-rate", str(class_hits / (1.0*class_lines or 1.0)))
615        c.setAttribute( "branch-rate",
616                        str(class_branch_hits / (1.0*class_branches or 1.0)) )
617        c.setAttribute("complexity", "0.0")
618
619        package[1][className] = c
620        package[2] += class_hits
621        package[3] += class_lines
622        package[4] += class_branch_hits
623        package[5] += class_branches
624
625    for packageName, packageData in list(packages.items()):
626        package = packageData[0];
627        packageXml.appendChild(package)
628        classes = doc.createElement("classes")
629        package.appendChild(classes)
630        classNames = list(packageData[1].keys())
631        classNames.sort()
632        for className in classNames:
633            classes.appendChild(packageData[1][className])
634        package.setAttribute("name", packageName.replace(os.sep, '.'))
635        package.setAttribute("line-rate", str(packageData[2]/(1.0*packageData[3] or 1.0)))
636        package.setAttribute( "branch-rate", str(packageData[4] / (1.0*packageData[5] or 1.0) ))
637        package.setAttribute("complexity", "0.0")
638
639
640    # Populate the <sources> element: this is either the root directory
641    # (specified by --root), or relative directories based
642    # on the filter, or the CWD
643    if options.root is not None:
644        source = doc.createElement("source")
645        source.appendChild(doc.createTextNode(options.root))
646        sources.appendChild(source)
647    elif len(source_dirs) > 0:
648        cwd = os.getcwd()
649        for d in source_dirs:
650            source = doc.createElement("source")
651            if d.startswith(cwd):
652                reldir = d[len(cwd):].lstrip(os.path.sep)
653            elif cwd.startswith(d):
654                i = 1
655                sys.stdout.write(d+'\n')
656                sys.stdout.write(os.path.join(*tuple([cwd]+['..']*i)) + '\n')
657                while normpath(d) != \
658                          normpath(os.path.join(*tuple([cwd]+['..']*i))):
659                    i += 1
660                reldir = os.path.join(*tuple(['..']*i))
661            else:
662                reldir = d
663            source.appendChild(doc.createTextNode(reldir))
664            sources.appendChild(source)
665    else:
666        source = doc.createElement("source")
667        source.appendChild(doc.createTextNode('.'))
668        sources.appendChild(source)
669
670    xmlString = doc.toprettyxml()
671    #xml.dom.ext.PrettyPrint(doc)
672    if options.output is None:
673        sys.stdout.write(xmlString+'\n')
674    else:
675        OUTPUT = open(options.output, 'w')
676        OUTPUT.write(xmlString +'\n')
677        OUTPUT.close()
678
679
680##
681## MAIN
682##
683
684#
685# Create option parser
686#
687parser = OptionParser()
688parser.add_option("--version",
689        help="Print the version number, then exit",
690        action="store_true",
691        dest="version",
692        default=False)
693parser.add_option("-v","--verbose",
694        help="Print progress messages",
695        action="store_true",
696        dest="verbose",
697        default=False)
698parser.add_option('--object-directory',
699        help="Specify the directory that contains the gcov data files.  gcovr must be able to identify the path between the *.gcda files and the directory where gcc was originally run.  Normally, gcovr can guess correctly.  This option overrides gcovr's normal path detection and can specify either the path from gcc to the gcda file (i.e. what was passed to gcc's '-o' option), or the path from the gcda file to gcc's original working directory.",
700        action="store",
701        dest="objdir",
702        default=None)
703parser.add_option("-o","--output",
704        help="Print output to this filename",
705        action="store",
706        dest="output",
707        default=None)
708parser.add_option("-k","--keep",
709        help="Keep temporary gcov files",
710        action="store_true",
711        dest="keep",
712        default=False)
713parser.add_option("-d","--delete",
714        help="Delete the coverage files after they are processed",
715        action="store_true",
716        dest="delete",
717        default=False)
718parser.add_option("-f","--filter",
719        help="Keep only the data files that match this regular expression",
720        action="store",
721        dest="filter",
722        default=None)
723parser.add_option("-e","--exclude",
724        help="Exclude data files that match this regular expression",
725        action="append",
726        dest="exclude",
727        default=[])
728parser.add_option("--gcov-filter",
729        help="Keep only gcov data files that match this regular expression",
730        action="store",
731        dest="gcov_filter",
732        default=None)
733parser.add_option("--gcov-exclude",
734        help="Exclude gcov data files that match this regular expression",
735        action="append",
736        dest="gcov_exclude",
737        default=[])
738parser.add_option("-r","--root",
739        help="Defines the root directory.  This is used to filter the files, and to standardize the output.",
740        action="store",
741        dest="root",
742        default=None)
743parser.add_option("-x","--xml",
744        help="Generate XML instead of the normal tabular output.",
745        action="store_true",
746        dest="xml",
747        default=None)
748parser.add_option("-b","--branches",
749        help="Tabulate the branch coverage instead of the line coverage.",
750        action="store_true",
751        dest="show_branch",
752        default=None)
753parser.add_option("-u","--sort-uncovered",
754        help="Sort entries by increasing number of uncovered lines.",
755        action="store_true",
756        dest="sort_uncovered",
757        default=None)
758parser.add_option("-p","--sort-percentage",
759        help="Sort entries by decreasing percentage of covered lines.",
760        action="store_true",
761        dest="sort_percent",
762        default=None)
763parser.usage="gcovr [options]"
764parser.description="A utility to run gcov and generate a simple report that summarizes the coverage"
765#
766# Process options
767#
768(options, args) = parser.parse_args(args=sys.argv)
769if options.version:
770    sys.stdout.write("gcovr "+__version__+'\n')
771    sys.stdout.write("\n")
772    sys.stdout.write("Copyright (2008) Sandia Corporation. Under the terms of Contract \n")
773    sys.stdout.write("DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government \n")
774    sys.stdout.write("retains certain rights in this software.\n")
775    sys.exit(0)
776if options.objdir:
777    if normpath(options.objdir) != options.objdir.replace('/',os.sep):
778        sys.stdout.write("WARNING: relative referencing in --object-directory; this could\n")
779        sys.stdout.write("         cause strange errors when gcovr attempts to identify\n")
780        sys.stdout.write("         the original gcc working directory.\n")
781#
782# Setup filters
783#
784for i in range(0,len(options.exclude)):
785    options.exclude[i] = re.compile(options.exclude[i])
786if options.filter is not None:
787    options.filter = re.compile(options.filter)
788elif options.root is not None:
789    options.filter = re.compile(re.escape(os.path.abspath(options.root)+os.sep))
790if options.filter is None:
791    options.filter = re.compile('')
792#
793for i in range(0,len(options.gcov_exclude)):
794    options.gcov_exclude[i] = re.compile(options.gcov_exclude[i])
795if options.gcov_filter is not None:
796    options.gcov_filter = re.compile(options.gcov_filter)
797else:
798    options.gcov_filter = re.compile('')
799#
800# Get data files
801#
802if len(args) == 1:
803    datafiles = get_datafiles(["."], options)
804else:
805    datafiles = get_datafiles(args[1:], options)
806#
807# Get coverage data
808#
809covdata = {}
810for file in datafiles:
811    process_datafile(file,covdata,options)
812if options.verbose:
813    sys.stdout.write("Gathered coveraged data for "+str(len(covdata))+" files\n")
814#
815# Print report
816#
817if options.xml:
818    print_xml_report(covdata)
819else:
820    print_text_report(covdata)
Note: See TracBrowser for help on using the repository browser.