source: gcovr/trunk/scripts/gcovr @ 2754

Revision 2754, 29.7 KB checked in by jdsiiro, 2 years ago (diff)

Normalize all path names to their full absolute path when processing
.gcov files. This should resolve #3921.

  • Property svn:executable set to *
  • Property svn:keywords set to Date Revision
Line 
1#! /usr/bin/env python
2#
3# A report generator for gcov 3.4
4#
5# This routine generates a format that is similar to the format generated
6# by the Python coverage.py module.  This code is similar to the
7# data processing performed by lcov's geninfo command.  However, we
8# don't worry about parsing the *.gcna files, and backwards compatibility for
9# older versions of gcov is not supported.
10#
11# Outstanding issues
12#   - verify that gcov 3.4 or newer is being used
13#   - verify support for symbolic links
14#
15# gcovr is a FAST project.  For documentation, bug reporting, and
16# updates, see https://software.sandia.gov/trac/fast/wiki/gcovr
17#
18# _________________________________________________________________________
19#
20# FAST: Utilities for Agile Software Development
21# Copyright (c) 2008 Sandia Corporation.
22# This software is distributed under the BSD License.
23# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
24# the U.S. Government retains certain rights in this software.
25# For more information, see the FAST README.txt file.
26#
27# $Revision 2754 $
28# $Date 2012-04-05 10:58:52 -0700 (Thu, 05 Apr 2012) $
29# _________________________________________________________________________
30#
31
32import copy
33import glob
34import os
35import re
36import subprocess
37import sys
38import time
39import xml.dom.minidom
40
41from optparse import OptionParser
42from string import Template
43from os.path import normpath
44
45__version__ = "2.4-prerelease"
46src_revision = "$Revision 2754 $"
47gcov_cmd = "gcov"
48
49output_re = re.compile("[Cc]reating [`'](.*)'$")
50source_re = re.compile("cannot open (source|graph) file")
51
52def version_str():
53    ans = __version__
54    m = re.match('\$Revision:\s*(\S+)\s*\$', src_revision)
55    if m:
56        ans = ans + " (r%s)" % (m.group(1))
57    return ans
58
59#
60# Container object for coverage statistics
61#
62class CoverageData(object):
63
64    def __init__(self, fname, uncovered, covered, branches, noncode):
65        self.fname=fname
66        # Shallow copies are cheap & "safe" because the caller will
67        # throw away their copies of covered & uncovered after calling
68        # us exactly *once*
69        self.uncovered = copy.copy(uncovered)
70        self.covered   = copy.copy(covered)
71        self.noncode   = copy.copy(noncode)
72        # But, a deep copy is required here
73        self.all_lines = copy.deepcopy(uncovered)
74        self.all_lines.update(covered.keys())
75        self.branches = copy.deepcopy(branches)
76
77    def update(self, uncovered, covered, branches, noncode):
78        self.all_lines.update(uncovered)
79        self.all_lines.update(covered.keys())
80        self.uncovered.update(uncovered)
81        self.noncode.intersection_update(noncode)
82        for k in covered.keys():
83            self.covered[k] = self.covered.get(k,0) + covered[k]
84        for k in branches.keys():
85            for b in branches[k]:
86                d = self.branches.setdefault(k, {})
87                d[b] = d.get(b, 0) + branches[k][b]
88        self.uncovered.difference_update(self.covered.keys())
89
90    def uncovered_str(self):
91        if options.show_branch:
92            # Don't do any aggregation on branch results
93            tmp = []
94            for line in self.branches.keys():
95                for branch in self.branches[line]:
96                    if self.branches[line][branch] == 0:
97                        tmp.append(line)
98                        break
99
100            tmp.sort()
101            return ",".join([str(x) for x in tmp]) or ""
102       
103        tmp = list(self.uncovered)
104        if len(tmp) == 0:
105            return ""
106
107        tmp.sort()
108        first = None
109        last = None
110        ranges=[]
111        for item in tmp:
112            if last is None:
113                first=item
114                last=item
115            elif item == (last+1):
116                last=item
117            else:
118                if len(self.noncode.intersection(range(last+1,item))) \
119                       == item - last - 1:
120                    last = item
121                    continue
122               
123                if first==last:
124                    ranges.append(str(first))
125                else:
126                    ranges.append(str(first)+"-"+str(last))
127                first=item
128                last=item
129        if first==last:
130            ranges.append(str(first))
131        else:
132            ranges.append(str(first)+"-"+str(last))
133        return ",".join(ranges)
134
135    def coverage(self):
136        if ( options.show_branch ):
137            total = 0
138            cover = 0
139            for line in self.branches.keys():
140                for branch in self.branches[line].keys():
141                    total += 1
142                    cover += self.branches[line][branch] > 0 and 1 or 0
143        else:
144            total = len(self.all_lines)
145            cover = len(self.covered)
146           
147        percent = total and str(int(100.0*cover/total)) or "--"
148        return (total, cover, percent)
149
150    def summary(self):
151        tmp = options.filter.sub('',self.fname)
152        if not self.fname.endswith(tmp):
153            # Do no truncation if the filter does not start matching at
154            # the beginning of the string
155            tmp = self.fname
156        tmp = tmp.ljust(40)
157        if len(tmp) > 40:
158            tmp=tmp+"\n"+" "*40
159
160        (total, cover, percent) = self.coverage()
161        return ( total, cover,
162                 tmp + str(total).rjust(8) + str(cover).rjust(8) + \
163                 percent.rjust(6) + "%   " + self.uncovered_str() )
164
165
166def search_file(expr, path=None, abspath=False, follow_links=False):
167    """
168    Given a search path, recursively descend to find files that match a
169    regular expression.
170
171    Can specify the following options:
172       path - The directory that is searched recursively
173       executable_extension - This string is used to see if there is an
174           implicit extension in the filename
175       executable - Test if the file is an executable (default=False)
176       isfile - Test if the file is file (default=True)
177    """
178    ans = []
179    pattern = re.compile(expr)
180    if path is None or path == ".":
181        path = os.getcwd()
182    elif not os.path.exists(path):
183        raise IOError("Unknown directory '"+path+"'")
184    for root, dirs, files in os.walk(path, topdown=True):
185        for name in files:
186           if pattern.match(name):
187                name = os.path.join(root,name)
188                if follow_links and os.path.islink(name):
189                    ans.append( os.path.abspath(os.readlink(name)) )
190                elif abspath:
191                    ans.append( os.path.abspath(name) )
192                else:
193                    ans.append( name )
194    return ans
195
196
197#
198# Get the list of datafiles in the directories specified by the user
199#
200def get_datafiles(flist, options, ext="gcda"):
201    allfiles=[]
202    for dir in flist:
203        if options.verbose:
204            sys.stdout.write("Scanning directory "+dir+" for "+ext+" files...\n")
205        files = search_file(".*\."+ext, dir, abspath=True, follow_links=True)
206        if options.verbose:
207            sys.stdout.write("Found %d files \n" % len(files))
208        allfiles += files
209    return allfiles
210
211
212def process_gcov_data(file, covdata, options):
213    INPUT = open(file,"r")
214    #
215    # Get the filename
216    #
217    line = INPUT.readline()
218    segments=line.split(":")
219    fname = os.path.abspath((segments[-1]).strip())
220    if options.verbose:
221        sys.stdout.write("Parsing coverage data for file %s\n" % fname)
222    #
223    # Return if the filename does not match the filter
224    #
225    if options.filter is not None and not options.filter.match(fname):
226        if options.verbose:
227            sys.stdout.write("  Filtering coverage data for file %s\n" % fname)
228        return
229    #
230    # Return if the filename matches the exclude pattern
231    #
232    for i in range(0,len(options.exclude)):
233        if options.exclude[i].match(options.filter.sub('',fname)) or \
234               options.exclude[i].match(fname) or \
235               options.exclude[i].match(os.path.abspath(fname)):
236            if options.verbose:
237                sys.stdout.write("  Excluding coverage data for file %s\n" % fname)
238            return
239    #
240    # Parse each line, and record the lines
241    # that are uncovered
242    #
243    noncode   = set()
244    uncovered = set()
245    covered   = {}
246    branches  = {}
247    #first_record=True
248    lineno = 0
249    for line in INPUT:
250        segments=line.split(":")
251        tmp = segments[0].strip()
252        try:
253            lineno = int(segments[1].strip())
254        except:
255            pass # keep previous line number!
256           
257        if tmp[0] == '#':
258            uncovered.add( lineno )
259        elif tmp[0] in "0123456789":
260            covered[lineno] = int(segments[0].strip())
261        elif tmp[0] == '-':
262            # remember certain non-executed lines
263            code = segments[2].strip()
264            if len(code) == 0 or code == "{" or code == "}" or \
265               code.startswith("//") or code == 'else':
266                noncode.add( lineno )
267        elif tmp.startswith('branch'):
268            fields = line.split()
269            try:
270                count = int(fields[3])
271                branches.setdefault(lineno, {})[int(fields[1])] = count
272            except:
273                # We ignore branches that were "never executed"
274                pass
275        elif tmp.startswith('call'):
276            pass
277        elif tmp.startswith('function'):
278            pass
279        elif tmp[0] == 'f':
280            pass
281            #if first_record:
282                #first_record=False
283                #uncovered.add(prev)
284            #if prev in uncovered:
285                #tokens=re.split('[ \t]+',tmp)
286                #if tokens[3] != "0":
287                    #uncovered.remove(prev)
288            #prev = int(segments[1].strip())
289            #first_record=True
290        else:
291            sys.stdout.write("UNKNOWN LINE DATA: %s\n" % tmp)
292    #
293    # If the file is already in covdata, then we
294    # remove lines that are covered here.  Otherwise,
295    # initialize covdata
296    #
297    if not fname in covdata:
298        covdata[fname] = CoverageData(fname,uncovered,covered,branches,noncode)
299    else:
300        covdata[fname].update(uncovered,covered,branches,noncode)
301    INPUT.close()
302
303#
304# Process a datafile (generated by running the instrumented application)
305# and run gcov with the corresponding arguments
306#
307# This is trickier than it sounds: The gcda/gcno files are stored in the
308# same directory as the object files; however, gcov must be run from the
309# same directory where gcc/g++ was run.  Normally, the user would know
310# where gcc/g++ was invoked from and could tell gcov the path to the
311# object (and gcda) files with the --object-directory command.
312# Unfortunately, we do everything backwards: gcovr looks for the gcda
313# files and then has to infer the original gcc working directory.
314#
315# In general, (but not always) we can assume that the gcda file is in a
316# subdirectory of the original gcc working directory, so we will first
317# try ".", and on error, move up the directory tree looking for the
318# correct working directory (letting gcov's own error codes dictate when
319# we hit the right directory).  This covers 90+% of the "normal" cases.
320# The exception to this is if gcc was invoked with "-o ../[...]" (i.e.,
321# the object directory was a peer (not a parent/child) of the cwd.  In
322# this case, things are really tough.  We accept an argument
323# (--object-directory) that SHOULD BE THE SAME as the one povided to
324# gcc.  We will then walk that path (backwards) in the hopes of
325# identifying the original gcc working directory (there is a bit of
326# trial-and-error here)
327#
328def process_datafile(filename, covdata, options):
329    #
330    # Launch gcov
331    #
332    abs_filename = os.path.abspath(filename)
333    (dirname,fname) = os.path.split(abs_filename)
334    #(name,ext) = os.path.splitext(base)
335
336    potential_wd = []
337    starting_dir = os.getcwd()
338    errors=[]
339    Done = False
340
341    if options.objdir:
342        src_components = abs_filename.split(os.sep)
343        components = normpath(options.objdir).split(os.sep)
344        idx = 1
345        while idx <= len(components):
346            if idx > len(src_components):
347                break
348            if components[-1*idx] != src_components[-1*idx]:
349                break
350            idx += 1
351        if idx > len(components):
352            pass # a parent dir; the normal process will find it
353        elif components[-1*idx] == '..':
354            dirs = [ os.path.join(src_components[:len(src_components)-idx+1]) ]
355            while idx <= len(components) and components[-1*idx] == '..':
356                tmp = []
357                for d in dirs:
358                    for f in os.listdir(d):
359                        x = os.path.join(d,f)
360                        if os.path.isdir(x):
361                            tmp.append(x)
362                dirs = tmp
363                idx += 1
364            potential_wd = dirs
365        else:
366            if components[0] == '':
367                # absolute path
368                tmp = [ options.objdir ]
369            else:
370                # relative path: check relative to both the cwd and the
371                # gcda file
372                tmp = [ os.path.join(x, options.objdir) for x in
373                        [os.path.dirname(abs_filename), os.getcwd()] ]
374            potential_wd = [ testdir for testdir in tmp
375                             if os.path.isdir(testdir) ]
376            if len(potential_wd) == 0:
377                errors.append("ERROR: cannot identify the location where GCC "
378                              "was run using --object-directory=%s\n" %
379                              options.objdir)
380            # Revert to the normal
381            #sys.exit(1)
382
383    # no objdir was specified (or it was a parent dir); walk up the dir tree
384    if len(potential_wd) == 0:
385        wd = os.path.split(abs_filename)[0]
386        while True:
387            potential_wd.append(wd)
388            wd = os.path.split(wd)[0]
389            if wd == potential_wd[-1]:
390                break
391
392    cmd = [ gcov_cmd, abs_filename,
393            "--branch-counts", "--branch-probabilities", "--preserve-paths",
394            '--object-directory', dirname ]
395
396    while len(potential_wd) > 0 and not Done:
397        # NB: either len(potential_wd) == 1, or all entires are absolute
398        # paths, so we don't have to chdir(starting_dir) at every
399        # iteration.
400        os.chdir(potential_wd.pop(0))
401       
402       
403        #if options.objdir:
404        #    cmd.extend(["--object-directory", Template(options.objdir).substitute(filename=filename, head=dirname, tail=base, root=name, ext=ext)])
405
406        if options.verbose:
407            sys.stdout.write("Running gcov: '%s' in '%s'\n" % ( ' '.join(cmd), os.getcwd() ))
408        (out, err) = subprocess.Popen( cmd,
409                                       stdout=subprocess.PIPE,
410                                       stderr=subprocess.PIPE ).communicate()
411        out=out.decode('utf-8')
412        err=err.decode('utf-8')
413
414        # find the files that gcov created
415        gcov_files = {'active':[], 'filter':[], 'exclude':[]}
416        for line in out.split(os.linesep):
417            found = output_re.search(line)
418            if found is not None:
419                fname = found.group(1)
420                if not options.gcov_filter.match(fname):
421                    if options.verbose:
422                        sys.stdout.write("Filtering gcov file %s\n" % fname)
423                    gcov_files['filter'].append(fname)
424                    continue
425                exclude=False
426                for i in range(0,len(options.gcov_exclude)):
427                    if options.gcov_exclude[i].match(options.gcov_filter.sub('',fname)) or \
428                           options.gcov_exclude[i].match(fname) or \
429                           options.gcov_exclude[i].match(os.path.abspath(fname)):
430                        exclude=True
431                        break
432                if not exclude:
433                    gcov_files['active'].append(fname)
434                elif options.verbose:
435                    sys.stdout.write("Excluding gcov file %s\n" % fname)
436                    gcov_files['exclude'].append(fname)
437
438        if source_re.search(err):
439            # gcov tossed errors: try the next potential_wd
440            errors.append(err)
441        else:
442            # Process *.gcov files
443            for fname in gcov_files['active']:
444                process_gcov_data(fname, covdata, options)
445            Done = True
446
447        if not options.keep:
448            for group in gcov_files.values():
449                for fname in group:
450                    os.remove(fname)
451
452    os.chdir(starting_dir)
453    if options.delete:
454        os.remove(abs_filename)
455       
456    if not Done:
457        sys.stdout.write("GCOV produced the following errors processing %s:\n   %s" \
458              "(gcovr could not infer a working directory " \
459              "that resolved it.)\n" % ( filename, "   ".join(errors) ))
460
461#
462# Produce the classic gcovr text report
463#
464def print_text_report(covdata):
465    def _num_uncovered(key):
466        (total, covered, percent) = covdata[key].coverage()
467        return total - covered
468    def _percent_uncovered(key):
469        (total, covered, percent) = covdata[key].coverage()
470        if covered:
471            return -1.0*covered/total
472        else:
473            return total or 1e6
474    def _alpha(key):
475        return key
476
477    if options.output:
478        OUTPUT = open(options.output,'w')
479    else:
480        OUTPUT = sys.stdout
481    total_lines=0
482    total_covered=0
483    # Header
484    OUTPUT.write("-"*78 + '\n')
485    a = options.show_branch and "Branch" or "Lines"
486    b = options.show_branch and "Taken" or "Exec"
487    OUTPUT.write("File".ljust(40) + a.rjust(8) + b.rjust(8)+ "  Cover   Missing\n")
488    OUTPUT.write("-"*78 + '\n')
489
490    # Data
491    keys = list(covdata.keys())
492    keys.sort(key=options.sort_uncovered and _num_uncovered or \
493              options.sort_percent and _percent_uncovered or _alpha)
494    for key in keys:
495        (t, n, txt) = covdata[key].summary()
496        total_lines += t
497        total_covered += n
498        OUTPUT.write(txt + '\n')
499
500    # Footer & summary
501    OUTPUT.write("-"*78 + '\n')
502    percent = total_lines and str(int(100.0*total_covered/total_lines)) or "--"
503    OUTPUT.write("TOTAL".ljust(40) + str(total_lines).rjust(8) + \
504          str(total_covered).rjust(8) + str(percent).rjust(6)+"%" + '\n')
505    OUTPUT.write("-"*78 + '\n')
506
507    # Close logfile
508    if options.output:
509        OUTPUT.close()
510
511#
512# Produce an XML report in the Cobertura format
513#
514def print_xml_report(covdata):
515    branchTotal = 0
516    branchCovered = 0
517    lineTotal = 0
518    lineCovered = 0
519
520    options.show_branch = True
521    for key in covdata.keys():
522        (total, covered, percent) = covdata[key].coverage()
523        branchTotal += total
524        branchCovered += covered
525
526    options.show_branch = False
527    for key in covdata.keys():
528        (total, covered, percent) = covdata[key].coverage()
529        lineTotal += total
530        lineCovered += covered
531   
532    impl = xml.dom.minidom.getDOMImplementation()
533    docType = impl.createDocumentType(
534        "coverage", None,
535        "http://cobertura.sourceforge.net/xml/coverage-03.dtd" )
536    doc = impl.createDocument(None, "coverage", docType)
537    root = doc.documentElement
538    root.setAttribute( "line-rate", lineTotal == 0 and '0.0' or
539                       str(float(lineCovered) / lineTotal) )
540    root.setAttribute( "branch-rate", branchTotal == 0 and '0.0' or
541                       str(float(branchCovered) / branchTotal) )
542    root.setAttribute( "timestamp", str(int(time.time())) )
543    root.setAttribute( "version", "gcovr %s" % (version_str(),) )
544
545    # Generate the <sources> element: this is either the root directory
546    # (specified by --root), or the CWD.
547    sources = doc.createElement("sources")
548    root.appendChild(sources)
549
550    # Generate the coverage output (on a per-package basis)
551    packageXml = doc.createElement("packages")
552    root.appendChild(packageXml)
553    packages = {}
554    source_dirs = set()
555
556    keys = list(covdata.keys())
557    keys.sort()
558    for f in keys:
559        data = covdata[f]
560        dir = options.filter.sub('',f)
561        if f.endswith(dir):
562            src_path = f[:-1*len(dir)]
563            if len(src_path) > 0:
564                while dir.startswith(os.path.sep):
565                    src_path += os.path.sep
566                    dir = dir[len(os.path.sep):]
567                source_dirs.add(src_path)
568        else:
569            # Do no truncation if the filter does not start matching at
570            # the beginning of the string
571            dir = f
572        (dir, fname) = os.path.split(dir)
573       
574        package = packages.setdefault(
575            dir, [ doc.createElement("package"), {},
576                   0, 0, 0, 0 ] )
577        c = doc.createElement("class")
578        lines = doc.createElement("lines")
579        c.appendChild(lines)
580
581        class_lines = 0
582        class_hits = 0
583        class_branches = 0
584        class_branch_hits = 0
585        for line in data.all_lines:
586            hits = data.covered.get(line, 0)
587            class_lines += 1
588            if hits > 0:
589                class_hits += 1
590            l = doc.createElement("line")
591            l.setAttribute("number", str(line))
592            l.setAttribute("hits", str(hits))
593            branches = data.branches.get(line)
594            if branches is None:
595                l.setAttribute("branch", "false")
596            else:
597                b_hits = 0
598                for v in branches.values():
599                    if v > 0:
600                        b_hits += 1
601                coverage = 100*b_hits/len(branches)
602                l.setAttribute("branch", "true")
603                l.setAttribute( "condition-coverage",
604                                "%i%% (%i/%i)" %
605                                (coverage, b_hits, len(branches)) )
606                cond = doc.createElement('condition')
607                cond.setAttribute("number", "0")
608                cond.setAttribute("type", "jump")
609                cond.setAttribute("coverage", "%i%%" % ( coverage ) )
610                class_branch_hits += b_hits
611                class_branches += float(len(branches))
612                conditions = doc.createElement("conditions")
613                conditions.appendChild(cond)
614                l.appendChild(conditions)
615               
616            lines.appendChild(l)
617
618        className = fname.replace('.', '_')
619        c.setAttribute("name", className)
620        c.setAttribute("filename", os.path.join(dir, fname))
621        c.setAttribute("line-rate", str(class_hits / (1.0*class_lines or 1.0)))
622        c.setAttribute( "branch-rate",
623                        str(class_branch_hits / (1.0*class_branches or 1.0)) )
624        c.setAttribute("complexity", "0.0")
625
626        package[1][className] = c
627        package[2] += class_hits
628        package[3] += class_lines
629        package[4] += class_branch_hits
630        package[5] += class_branches
631
632    for packageName, packageData in packages.items():
633        package = packageData[0];
634        packageXml.appendChild(package)
635        classes = doc.createElement("classes")
636        package.appendChild(classes)
637        classNames = list(packageData[1].keys())
638        classNames.sort()
639        for className in classNames:
640            classes.appendChild(packageData[1][className])
641        package.setAttribute("name", packageName.replace(os.sep, '.'))
642        package.setAttribute("line-rate", str(packageData[2]/(1.0*packageData[3] or 1.0)))
643        package.setAttribute( "branch-rate", str(packageData[4] / (1.0*packageData[5] or 1.0) ))
644        package.setAttribute("complexity", "0.0")
645
646
647    # Populate the <sources> element: this is either the root directory
648    # (specified by --root), or relative directories based
649    # on the filter, or the CWD
650    if options.root is not None:
651        source = doc.createElement("source")
652        source.appendChild(doc.createTextNode(options.root))
653        sources.appendChild(source)
654    elif len(source_dirs) > 0:
655        cwd = os.getcwd()
656        for d in source_dirs:
657            source = doc.createElement("source")
658            if d.startswith(cwd):
659                reldir = d[len(cwd):].lstrip(os.path.sep)
660            elif cwd.startswith(d):
661                i = 1
662                sys.stdout.write(d+'\n')
663                sys.stdout.write(os.path.join(*tuple([cwd]+['..']*i)) + '\n')
664                while normpath(d) != \
665                          normpath(os.path.join(*tuple([cwd]+['..']*i))):
666                    i += 1
667                reldir = os.path.join(*tuple(['..']*i))
668            else:
669                reldir = d
670            source.appendChild(doc.createTextNode(reldir))
671            sources.appendChild(source)
672    else:
673        source = doc.createElement("source")
674        source.appendChild(doc.createTextNode('.'))
675        sources.appendChild(source)
676
677    xmlString = doc.toprettyxml()
678    #xml.dom.ext.PrettyPrint(doc)
679    if options.output is None:
680        sys.stdout.write(xmlString+'\n')
681    else:
682        OUTPUT = open(options.output, 'w')
683        OUTPUT.write(xmlString +'\n')
684        OUTPUT.close()
685
686
687##
688## MAIN
689##
690
691#
692# Create option parser
693#
694parser = OptionParser()
695parser.add_option("--version",
696        help="Print the version number, then exit",
697        action="store_true",
698        dest="version",
699        default=False)
700parser.add_option("-v","--verbose",
701        help="Print progress messages",
702        action="store_true",
703        dest="verbose",
704        default=False)
705parser.add_option('--object-directory',
706        help="Specify the directory that contains the gcov data files.  gcovr must be able to identify the path between the *.gcda files and the directory where gcc was originally run.  Normally, gcovr can guess correctly.  This option overrides gcovr's normal path detection and can specify either the path from gcc to the gcda file (i.e. what was passed to gcc's '-o' option), or the path from the gcda file to gcc's original working directory.",
707        action="store",
708        dest="objdir",
709        default=None)
710parser.add_option("-o","--output",
711        help="Print output to this filename",
712        action="store",
713        dest="output",
714        default=None)
715parser.add_option("-k","--keep",
716        help="Keep temporary gcov files",
717        action="store_true",
718        dest="keep",
719        default=False)
720parser.add_option("-d","--delete",
721        help="Delete the coverage files after they are processed",
722        action="store_true",
723        dest="delete",
724        default=False)
725parser.add_option("-f","--filter",
726        help="Keep only the data files that match this regular expression",
727        action="store",
728        dest="filter",
729        default=None)
730parser.add_option("-e","--exclude",
731        help="Exclude data files that match this regular expression",
732        action="append",
733        dest="exclude",
734        default=[])
735parser.add_option("--gcov-filter",
736        help="Keep only gcov data files that match this regular expression",
737        action="store",
738        dest="gcov_filter",
739        default=None)
740parser.add_option("--gcov-exclude",
741        help="Exclude gcov data files that match this regular expression",
742        action="append",
743        dest="gcov_exclude",
744        default=[])
745parser.add_option("-r","--root",
746        help="Defines the root directory.  This is used to filter the files, and to standardize the output.",
747        action="store",
748        dest="root",
749        default=None)
750parser.add_option("-x","--xml",
751        help="Generate XML instead of the normal tabular output.",
752        action="store_true",
753        dest="xml",
754        default=None)
755parser.add_option("-b","--branches",
756        help="Tabulate the branch coverage instead of the line coverage.",
757        action="store_true",
758        dest="show_branch",
759        default=None)
760parser.add_option("-u","--sort-uncovered",
761        help="Sort entries by increasing number of uncovered lines.",
762        action="store_true",
763        dest="sort_uncovered",
764        default=None)
765parser.add_option("-p","--sort-percentage",
766        help="Sort entries by decreasing percentage of covered lines.",
767        action="store_true",
768        dest="sort_percent",
769        default=None)
770parser.usage="gcovr [options]"
771parser.description="A utility to run gcov and generate a simple report that summarizes the coverage"
772#
773# Process options
774#
775(options, args) = parser.parse_args(args=sys.argv)
776if options.version:
777    sys.stdout.write("gcovr %s\n" % (version_str(),))
778    sys.stdout.write("\n")
779    sys.stdout.write("Copyright (2008) Sandia Corporation. Under the terms of Contract \n")
780    sys.stdout.write("DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government \n")
781    sys.stdout.write("retains certain rights in this software.\n")
782    sys.exit(0)
783if options.objdir:
784    if normpath(options.objdir) != options.objdir.replace('/',os.sep):
785        sys.stdout.write("WARNING: relative referencing in --object-directory; this could\n")
786        sys.stdout.write("         cause strange errors when gcovr attempts to identify\n")
787        sys.stdout.write("         the original gcc working directory.\n")
788#
789# Setup filters
790#
791for i in range(0,len(options.exclude)):
792    options.exclude[i] = re.compile(options.exclude[i])
793if options.filter is not None:
794    options.filter = re.compile(options.filter)
795elif options.root is not None:
796    if not options.root:
797        sys.stderr.write("ERROR: empty --root option.\n"
798                         "   Root specifies the path to the root directory of your project: "
799                         "cannot be an empty string.\n")
800        sys.exit(1)
801    options.filter = re.compile(re.escape(os.path.abspath(options.root)+os.sep))
802if options.filter is None:
803    options.filter = re.compile('')
804#
805for i in range(0,len(options.gcov_exclude)):
806    options.gcov_exclude[i] = re.compile(options.gcov_exclude[i])
807if options.gcov_filter is not None:
808    options.gcov_filter = re.compile(options.gcov_filter)
809else:
810    options.gcov_filter = re.compile('')
811#
812# Get data files
813#
814if len(args) == 1:
815    datafiles = get_datafiles(["."], options)
816else:
817    datafiles = get_datafiles(args[1:], options)
818#
819# Get coverage data
820#
821covdata = {}
822for file in datafiles:
823    process_datafile(file,covdata,options)
824if options.verbose:
825    sys.stdout.write("Gathered coveraged data for "+str(len(covdata))+" files\n")
826#
827# Print report
828#
829if options.xml:
830    print_xml_report(covdata)
831else:
832    print_text_report(covdata)
Note: See TracBrowser for help on using the repository browser.