llvm.org GIT mirror lnt / e17846c
[LNT] Python 3 support: print statements Summary: This patch applies `2to3 -f print` fixes, corrects the indentation mangled by `2to3` for multiline print statements, and adds `from __future__ import print_function` to each file that was modified. As requested on review, spaces are then added after commas separating arguments to `print`, separating function call arguments or tuple elements within arguments to `print`, etc. The changes cover the files found to be affected when running tests (without result submission). Reviewers: cmatthews, thopre, kristof.beyls Reviewed By: cmatthews Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D67532 git-svn-id: https://llvm.org/svn/llvm-project/lnt/trunk@371891 91177308-0d34-0410-b5e6-96231b3b80d8 Hubert Tong a month ago
12 changed file(s) with 216 addition(s) and 204 deletion(s). Raw diff Collapse all Expand all
104104 ##
105105 ## 11/08/98 ... fixed aput to output large arrays correctly
106106
107 from __future__ import print_function
107108 import stats # required 3rd party module
108109 import string, copy
109110 from types import *
513514 maxsize[col] = max(map(len,items)) + extra
514515 for row in lst:
515516 if row == ['\n'] or row == '\n' or row == '' or row == ['']:
516 print
517 print()
517518 elif row == ['dashes'] or row == 'dashes':
518519 dashes = [0]*len(maxsize)
519520 for j in range(len(maxsize)):
520521 dashes[j] = '-'*(maxsize[j]-2)
521 print lineincustcols(dashes,maxsize)
522 else:
523 print lineincustcols(row,maxsize)
522 print(lineincustcols(dashes, maxsize))
523 else:
524 print(lineincustcols(row, maxsize))
524525 return None
525526
526527
533534 Returns: None
534535 """
535536 for row in listoflists:
536 print lineincols(row,colsize)
537 print(lineincols(row, colsize))
537538 return None
538539
539540
546547 """
547548 for row in listoflists:
548549 if row[-1] == '\n':
549 print row,
550 else:
551 print row
550 print(row, end=' ')
551 else:
552 print(row)
552553 return None
553554
554555
4747 argument types require different functions to be called. Having
4848 implementated the Dispatch class, however, means that to get info on
4949 a given function, you must use the REAL function name ... that is
50 "print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine,
51 while "print stats.mean.__doc__" will print the doc for the Dispatch
50 "print(stats.lmean.__doc__)" or "print(stats.amean.__doc__)" work fine,
51 while "print(stats.mean.__doc__)" will print the doc for the Dispatch
5252 class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options
5353 but should otherwise be consistent with the corresponding list functions.
5454
221221 ## changed name of skewness and askewness to skew and askew
222222 ## fixed (a)histogram (which sometimes counted points
223223
224 from __future__ import print_function
224225 import pstat # required 3rd party module
225226 import math, string, copy # required python modules
226227 from types import *
472473 Usage: lscoreatpercentile(inlist,percent)
473474 """
474475 if percent > 1:
475 print "\nDividing percent>1 by 100 in lscoreatpercentile().\n"
476 print("\nDividing percent>1 by 100 in lscoreatpercentile().\n")
476477 percent = percent / 100.0
477478 targetcf = percent*len(inlist)
478479 h, lrl, binsize, extras = histogram(inlist)
534535 except:
535536 extrapoints = extrapoints + 1
536537 if (extrapoints > 0 and printextras == 1):
537 print '\nPoints outside given histogram range =',extrapoints
538 print('\nPoints outside given histogram range =', extrapoints)
538539 return (bins, lowerreallimit, binsize, extrapoints)
539540
540541
776777 """
777778 samples = ''
778779 while samples not in ['i','r','I','R','c','C']:
779 print '\nIndependent or related samples, or correlation (i,r,c): ',
780 print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
780781 samples = raw_input()
781782
782783 if samples in ['i','I','r','R']:
783 print '\nComparing variances ...',
784 print('\nComparing variances ...', end=' ')
784785 # USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
785786 r = obrientransform(x,y)
786787 f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
788789 vartype='unequal, p='+str(round(p,4))
789790 else:
790791 vartype='equal'
791 print vartype
792 print(vartype)
792793 if samples in ['i','I']:
793794 if vartype[0]=='e':
794795 t,p = ttest_ind(x,y,0)
795 print '\nIndependent samples t-test: ', round(t,4),round(p,4)
796 print('\nIndependent samples t-test: ', round(t, 4), round(p, 4))
796797 else:
797798 if len(x)>20 or len(y)>20:
798799 z,p = ranksums(x,y)
799 print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)
800 print('\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(p, 4))
800801 else:
801802 u,p = mannwhitneyu(x,y)
802 print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)
803 print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u, 4), round(p, 4))
803804
804805 else: # RELATED SAMPLES
805806 if vartype[0]=='e':
806807 t,p = ttest_rel(x,y,0)
807 print '\nRelated samples t-test: ', round(t,4),round(p,4)
808 print('\nRelated samples t-test: ', round(t, 4), round(p, 4))
808809 else:
809810 t,p = ranksums(x,y)
810 print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)
811 print('\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4))
811812 else: # CORRELATION ANALYSIS
812813 corrtype = ''
813814 while corrtype not in ['c','C','r','R','d','D']:
814 print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
815 print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
815816 corrtype = raw_input()
816817 if corrtype in ['c','C']:
817818 m,b,r,p,see = linregress(x,y)
818 print '\nLinear regression for continuous variables ...'
819 print('\nLinear regression for continuous variables ...')
819820 lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
820821 pstat.printcc(lol)
821822 elif corrtype in ['r','R']:
822823 r,p = spearmanr(x,y)
823 print '\nCorrelation for ranked variables ...'
824 print "Spearman's r: ",round(r,4),round(p,4)
824 print('\nCorrelation for ranked variables ...')
825 print("Spearman's r: ", round(r, 4), round(p, 4))
825826 else: # DICHOTOMOUS
826827 r,p = pointbiserialr(x,y)
827 print '\nAssuming x contains a dichotomous variable ...'
828 print 'Point Biserial r: ',round(r,4),round(p,4)
829 print '\n\n'
828 print('\nAssuming x contains a dichotomous variable ...')
829 print('Point Biserial r: ', round(r, 4), round(p, 4))
830 print('\n\n')
830831 return None
831832
832833
15001501 bz = 1.0
15011502 if (abs(az-aold)<(EPS*abs(az))):
15021503 return az
1503 print 'a or b too big, or ITMAX too small in Betacf.'
1504 print('a or b too big, or ITMAX too small in Betacf.')
15041505
15051506
15061507 def lgammln(xx):
18201821 lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],
18211822 [name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]
18221823 if type(fname)<>StringType or len(fname)==0:
1823 print
1824 print statname
1825 print
1824 print()
1825 print(statname)
1826 print()
18261827 pstat.printcc(lofl)
1827 print
1828 print()
18281829 try:
18291830 if stat.shape == ():
18301831 stat = stat[0]
18321833 prob = prob[0]
18331834 except:
18341835 pass
1835 print 'Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix
1836 print
1836 print('Test statistic = ', round(stat, 3), ' p = ', round(prob, 3), suffix)
1837 print()
18371838 else:
18381839 file = open(fname,writemode)
18391840 file.write('\n'+statname+'\n\n')
24162417 denom = N.power(amoment(a,2,dimension),1.5)
24172418 zero = N.equal(denom,0)
24182419 if type(denom) == N.ndarray and asum(zero) <> 0:
2419 print "Number of zeros in askew: ",asum(zero)
2420 print("Number of zeros in askew: ", asum(zero))
24202421 denom = denom + zero # prevent divide-by-zero
24212422 return N.where(zero, 0, amoment(a,3,dimension)/denom)
24222423
24352436 denom = N.power(amoment(a,2,dimension),2)
24362437 zero = N.equal(denom,0)
24372438 if type(denom) == N.ndarray and asum(zero) <> 0:
2438 print "Number of zeros in akurtosis: ",asum(zero)
2439 print("Number of zeros in akurtosis: ", asum(zero))
24392440 denom = denom + zero # prevent divide-by-zero
24402441 return N.where(zero,0,amoment(a,4,dimension)/denom)
24412442
25052506 dimension = 0
25062507 n = float(a.shape[dimension])
25072508 if n<20:
2508 print "akurtosistest only valid for n>=20 ... continuing anyway, n=",n
2509 print("akurtosistest only valid for n>=20 ... continuing anyway, n=", n)
25092510 b2 = akurtosis(a,dimension)
25102511 E = 3.0*(n-1) /(n+1)
25112512 varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
26282629 except: # point outside lower/upper limits
26292630 extrapoints = extrapoints + 1
26302631 if (extrapoints > 0 and printextras == 1):
2631 print '\nPoints outside given histogram range =',extrapoints
2632 print('\nPoints outside given histogram range =', extrapoints)
26322633 return (bins, lowerreallimit, binsize, extrapoints)
26332634
26342635
30003001 """
30013002 samples = ''
30023003 while samples not in ['i','r','I','R','c','C']:
3003 print '\nIndependent or related samples, or correlation (i,r,c): ',
3004 print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ')
30043005 samples = raw_input()
30053006
30063007 if samples in ['i','I','r','R']:
3007 print '\nComparing variances ...',
3008 print('\nComparing variances ...', end=' ')
30083009 # USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
30093010 r = obrientransform(x,y)
30103011 f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
30123013 vartype='unequal, p='+str(round(p,4))
30133014 else:
30143015 vartype='equal'
3015 print vartype
3016 print(vartype)
30163017 if samples in ['i','I']:
30173018 if vartype[0]=='e':
30183019 t,p = ttest_ind(x,y,None,0)
3019 print '\nIndependent samples t-test: ', round(t,4),round(p,4)
3020 print('\nIndependent samples t-test: ', round(t, 4), round(p, 4))
30203021 else:
30213022 if len(x)>20 or len(y)>20:
30223023 z,p = ranksums(x,y)
3023 print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)
3024 print('\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(p, 4))
30243025 else:
30253026 u,p = mannwhitneyu(x,y)
3026 print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)
3027 print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u, 4), round(p, 4))
30273028
30283029 else: # RELATED SAMPLES
30293030 if vartype[0]=='e':
30303031 t,p = ttest_rel(x,y,0)
3031 print '\nRelated samples t-test: ', round(t,4),round(p,4)
3032 print('\nRelated samples t-test: ', round(t, 4), round(p, 4))
30323033 else:
30333034 t,p = ranksums(x,y)
3034 print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)
3035 print('\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4))
30353036 else: # CORRELATION ANALYSIS
30363037 corrtype = ''
30373038 while corrtype not in ['c','C','r','R','d','D']:
3038 print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
3039 print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ')
30393040 corrtype = raw_input()
30403041 if corrtype in ['c','C']:
30413042 m,b,r,p,see = linregress(x,y)
3042 print '\nLinear regression for continuous variables ...'
3043 print('\nLinear regression for continuous variables ...')
30433044 lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
30443045 pstat.printcc(lol)
30453046 elif corrtype in ['r','R']:
30463047 r,p = spearmanr(x,y)
3047 print '\nCorrelation for ranked variables ...'
3048 print "Spearman's r: ",round(r,4),round(p,4)
3048 print('\nCorrelation for ranked variables ...')
3049 print("Spearman's r: ", round(r, 4), round(p, 4))
30493050 else: # DICHOTOMOUS
30503051 r,p = pointbiserialr(x,y)
3051 print '\nAssuming x contains a dichotomous variable ...'
3052 print 'Point Biserial r: ',round(r,4),round(p,4)
3053 print '\n\n'
3052 print('\nAssuming x contains a dichotomous variable ...')
3053 print('Point Biserial r: ', round(r, 4), round(p, 4))
3054 print('\n\n')
30543055 return None
30553056
30563057
32833284 shp = N.ones(len(y.shape))
32843285 shp[0] = len(x)
32853286 x.shape = shp
3286 print x.shape, y.shape
3287 print(x.shape, y.shape)
32873288 r_num = n*(N.add.reduce(x*y,0)) - N.add.reduce(x)*N.add.reduce(y,0)
32883289 r_den = N.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y,0)-asquare_of_sums(y,0)))
32893290 zerodivproblem = N.equal(r_den,0)
34033404 pval = abs(pval)
34043405 t = N.ones(pval.shape,N.float_)*50
34053406 step = N.ones(pval.shape,N.float_)*25
3406 print "Initial ap2t() prob calc"
3407 print("Initial ap2t() prob calc")
34073408 prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
3408 print 'ap2t() iter: ',
3409 print('ap2t() iter: ', end=' ')
34093410 for i in range(10):
3410 print i,' ',
3411 print(i, ' ', end=' ')
34113412 t = N.where(pval
34123413 prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
34133414 step = step/2
3414 print
3415 print()
34153416 # since this is an ugly hack, we get ugly boundaries
34163417 t = N.where(t>99.9,1000,t) # hit upper-boundary
34173418 t = t+signs
39343935 mask = N.clip(mask+newmask,0,1)
39353936 noconverge = asum(N.equal(frozen,-1))
39363937 if noconverge <> 0 and verbose:
3937 print 'a or b too big, or ITMAX too small in Betacf for ',noconverge,' elements'
3938 print('a or b too big, or ITMAX too small in Betacf for ', noconverge, ' elements')
39383939 if arrayflag:
39393940 return frozen
39403941 else:
40194020 Returns: statistic, p-value ???
40204021 """
40214022 if len(para) <> len(data):
4022 print "data and para must be same length in aglm"
4023 print("data and para must be same length in aglm")
40234024 return
40244025 n = len(para)
40254026 p = pstat.aunique(para)
0 from __future__ import print_function
1 import click
12 import platform
23
163164 # Execute an upgrade on the database to initialize the schema.
164165 lnt.server.db.migrate.update_path(db_path)
165166
166 print 'created LNT configuration in %r' % basepath
167 print ' configuration file: %s' % cfg_path
168 print ' WSGI app : %s' % wsgi_path
169 print ' database file : %s' % db_path
170 print ' temporary dir : %s' % tmp_path
171 print ' host URL : %s' % hosturl
172 print
173 print 'You can execute:'
174 print ' %s' % wsgi_path
175 print 'to test your installation with the builtin server.'
176 print
177 print 'For production use configure this application to run with any'
178 print 'WSGI capable web server. You may need to modify the permissions'
179 print 'on the database and temporary file directory to allow writing'
180 print 'by the web app.'
181 print
167 print('created LNT configuration in %r' % basepath)
168 print(' configuration file: %s' % cfg_path)
169 print(' WSGI app : %s' % wsgi_path)
170 print(' database file : %s' % db_path)
171 print(' temporary dir : %s' % tmp_path)
172 print(' host URL : %s' % hosturl)
173 print()
174 print('You can execute:')
175 print(' %s' % wsgi_path)
176 print('to test your installation with the builtin server.')
177 print()
178 print('For production use configure this application to run with any')
179 print('WSGI capable web server. You may need to modify the permissions')
180 print('on the database and temporary file directory to allow writing')
181 print('by the web app.')
182 print()
0 """Implement the command line 'lnt' tool."""
1 from __future__ import print_function
12 from .common import init_logger
23 from .common import submit_options
34 from .convert import action_convert
133134 result_url = results.get('result_url')
134135 if result_url is not None:
135136 if verbose:
136 print "Results available at:", result_url
137 print("Results available at:", result_url)
137138 else:
138 print result_url
139 print(result_url)
139140 elif verbose:
140 print "Results available at: no URL available"
141 print("Results available at: no URL available")
141142
142143
143144 class RunTestCLI(click.MultiCommand):
166167 import lnt.tests
167168 import inspect
168169
169 print 'Available tests:'
170 print('Available tests:')
170171 test_names = lnt.tests.get_names()
171172 max_name = max(map(len, test_names))
172173 for name in test_names:
173174 test_module = lnt.tests.get_module(name)
174175 description = inspect.cleandoc(test_module.__doc__)
175 print ' %-*s - %s' % (max_name, name, description)
176 print(' %-*s - %s' % (max_name, name, description))
176177
177178
178179 @click.command("submit")
408409 def command_get_version(input):
409410 """print the version of a profile"""
410411 import lnt.testing.profile.profile as profile
411 print profile.Profile.fromFile(input).getVersion()
412 print(profile.Profile.fromFile(input).getVersion())
412413
413414
414415 @action_profile.command("getTopLevelCounters")
417418 """print the whole-profile counter values"""
418419 import json
419420 import lnt.testing.profile.profile as profile
420 print json.dumps(profile.Profile.fromFile(input).getTopLevelCounters())
421 print(json.dumps(profile.Profile.fromFile(input).getTopLevelCounters()))
421422
422423
423424 @action_profile.command("getFunctions")
426427 """print the functions in a profile"""
427428 import json
428429 import lnt.testing.profile.profile as profile
429 print json.dumps(profile.Profile.fromFile(input).getFunctions())
430 print(json.dumps(profile.Profile.fromFile(input).getFunctions()))
430431
431432
432433 @action_profile.command("getCodeForFunction")
436437 """print the code/instruction for a function"""
437438 import json
438439 import lnt.testing.profile.profile as profile
439 print json.dumps(
440 list(profile.Profile.fromFile(input).getCodeForFunction(fn)))
440 print(json.dumps(
441 list(profile.Profile.fromFile(input).getCodeForFunction(fn))))
441442
442443
443444 def _version_check():
471472 if not value or ctx.resilient_parsing:
472473 return
473474 if lnt.__version__:
474 print "LNT %s" % (lnt.__version__,)
475 print("LNT %s" % (lnt.__version__, ))
475476 ctx.exit()
476477
477478
0 from __future__ import print_function
1 import StringIO
12 import logging
23 import logging.handlers
261262 rotating.setLevel(logging.DEBUG)
262263 self.logger.addHandler(rotating)
263264 except (OSError, IOError) as e:
264 print >> sys.stderr, "Error making log file", \
265 LOG_FILENAME, str(e)
266 print >> sys.stderr, "Will not log to file."
265 print("Error making log file", \
266 LOG_FILENAME, str(e), file=sys.stderr)
267 print("Will not log to file.", file=sys.stderr)
267268 else:
268269 self.logger.info("Started file logging.")
269 print "Logging to :", LOG_FILENAME
270 print("Logging to :", LOG_FILENAME)
270271 else:
271272 self.config['log_file_name'] = log_file_name
272273
11 Base class for builtin-in tests.
22 """
33
4 from __future__ import print_function
45 import sys
56 import os
67
4243 def log(self, message, ts=None):
4344 if not ts:
4445 ts = timestamp()
45 print >>sys.stderr, '%s: %s' % (ts, message)
46 print('%s: %s' % (ts, message), file=sys.stderr)
4647
4748 @staticmethod
4849 def print_report(report, output):
5152 output_stream = sys.stdout
5253 else:
5354 output_stream = open(output, 'w')
54 print >> output_stream, report.render()
55 print(report.render(), file=output_stream)
5556 if output_stream is not sys.stdout:
5657 output_stream.close()
5758
8687 """Print the result URL"""
8788 result_url = server_results.get('result_url', None)
8889 if result_url is not None:
89 print "Results available at:", server_results['result_url']
90 print("Results available at:", server_results['result_url'])
0 """Single file compile-time performance testing"""
1 from __future__ import print_function
12 import errno
23 import hashlib
34 import json
758759 # Set up the sandbox.
759760 global g_output_dir
760761 if not os.path.exists(opts.sandbox_path):
761 print >>sys.stderr, "%s: creating sandbox: %r" % (
762 timestamp(), opts.sandbox_path)
762 print("%s: creating sandbox: %r" % (
763 timestamp(), opts.sandbox_path), file=sys.stderr)
763764 os.mkdir(opts.sandbox_path)
764765 if opts.timestamp_build:
765766 fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-')
887888
888889 # Show the tests, if requested.
889890 if opts.show_tests:
890 print >>sys.stderr, 'Available Tests'
891 print('Available Tests', file=sys.stderr)
891892 for name in sorted(set(name for name, _ in all_tests)):
892 print >>sys.stderr, ' %s' % (name,)
893 print
893 print(' %s' % (name, ), file=sys.stderr)
894 print()
894895 raise SystemExit
895896
896897 # Find the tests to run.
0 """LLVM test-suite compile and execution tests"""
1 from __future__ import print_function
12 import csv
23 import os
34 import platform
6364 cmdstr = ' '.join(args)
6465
6566 if 'cwd' in kwargs:
66 print >>self._log, "# In working dir: " + kwargs['cwd']
67 print >>self.log, cmdstr
67 print("# In working dir: " + kwargs['cwd'], file=self._log)
68 print(cmdstr, file=self.log)
6869
6970 self._log.flush()
7071 p = subprocess.Popen(args, stdout=self._log, stderr=self._log,
529530 # parallel build options to the test.
530531 test_modules.sort()
531532
532 print >>sys.stderr, '%s: executing test modules' % (timestamp(),)
533 print('%s: executing test modules' % (timestamp(), ), file=sys.stderr)
533534 results = []
534535 for name in test_modules:
535536 # First, load the test module file.
654655 if config.use_isolation:
655656 # Write out the sandbox profile.
656657 sandbox_profile_path = os.path.join(basedir, "isolation.sb")
657 print >>sys.stderr, "%s: creating sandbox profile %r" % (
658 timestamp(), sandbox_profile_path)
658 print("%s: creating sandbox profile %r" % (
659 timestamp(), sandbox_profile_path), file=sys.stderr)
659660 with open(sandbox_profile_path, 'w') as f:
660 print >>f, """
661 print("""
661662 ;; Sandbox profile for isolation test access.
662663 (version 1)
663664
677678 (regex #"^/private/tmp/")
678679 (regex #"^/private/var/folders/")
679680 (regex #"^/dev/")
680 (regex #"^%s"))""" % (basedir,)
681 (regex #"^%s"))""" % (basedir, ), file=f)
681682 common_args = ['sandbox-exec', '-f', sandbox_profile_path] +\
682683 common_args
683684
684685 # Run a separate 'make build' step if --build-threads was given.
685686 if config.build_threads > 0:
686687 args = common_args + ['-j', str(config.build_threads), 'build']
687 print >>test_log, '%s: running: %s' % (timestamp(),
688 ' '.join('"%s"' % a
689 for a in args))
688 print('%s: running: %s' % (timestamp(),
689 ' '.join('"%s"' % a
690 for a in args)), file=test_log)
690691 test_log.flush()
691692
692 print >>sys.stderr, '%s: building "nightly tests" with -j%u...' % (
693 timestamp(), config.build_threads)
693 print('%s: building "nightly tests" with -j%u...' % (
694 timestamp(), config.build_threads), file=sys.stderr)
694695 res = execute_command(test_log, basedir, args, report_dir)
695696 if res != 0:
696 print >> sys.stderr, "Failure while running make build! " \
697 "See log: %s" % test_log.name
697 print("Failure while running make build! " \
698 "See log: %s" % test_log.name, file=sys.stderr)
698699
699700 # Then 'make report'.
700701 args = common_args + ['-j', str(config.threads),
701702 'report', 'report.%s.csv' % config.test_style]
702 print >>test_log, '%s: running: %s' % (timestamp(),
703 ' '.join('"%s"' % a
704 for a in args))
703 print('%s: running: %s' % (timestamp(),
704 ' '.join('"%s"' % a
705 for a in args)), file=test_log)
705706 test_log.flush()
706707
707708 # FIXME: We shouldn't need to set env=os.environ here, but if we don't
708709 # somehow MACOSX_DEPLOYMENT_TARGET gets injected into the environment on OS
709710 # X (which changes the driver behavior and causes generally weirdness).
710 print >>sys.stderr, '%s: executing "nightly tests" with -j%u...' % (
711 timestamp(), config.threads)
711 print('%s: executing "nightly tests" with -j%u...' % (
712 timestamp(), config.threads), file=sys.stderr)
712713
713714 res = execute_command(test_log, basedir, args, report_dir)
714715
715716 if res != 0:
716 print >> sys.stderr, "Failure while running nightly tests! "\
717 "See log: %s" % test_log.name
717 print("Failure while running nightly tests! "\
718 "See log: %s" % test_log.name, file=sys.stderr)
718719
719720
720721 # Keep a mapping of mangled test names, to the original names in the
866867 def prepare_report_dir(config):
867868 # Set up the sandbox.
868869 sandbox_path = config.sandbox_path
869 print sandbox_path
870 print(sandbox_path)
870871 if not os.path.exists(sandbox_path):
871 print >>sys.stderr, "%s: creating sandbox: %r" % (
872 timestamp(), sandbox_path)
872 print("%s: creating sandbox: %r" % (
873 timestamp(), sandbox_path), file=sys.stderr)
873874 os.mkdir(sandbox_path)
874875
875876 # Create the per-test directory.
907908 def update_tools(make_variables, config, iteration):
908909 """Update the test suite tools. """
909910
910 print >>sys.stderr, '%s: building test-suite tools' % (timestamp(),)
911 print('%s: building test-suite tools' % (timestamp(), ), file=sys.stderr)
911912 args = ['make', 'tools']
912913 args.extend('%s=%s' % (k, v) for k, v in make_variables.items())
913914 build_tools_log_path = os.path.join(config.build_dir(iteration),
914915 'build-tools.log')
915916 build_tools_log = open(build_tools_log_path, 'w')
916 print >>build_tools_log, '%s: running: %s' % (timestamp(),
917 ' '.join('"%s"' % a
918 for a in args))
917 print('%s: running: %s' % (timestamp(),
918 ' '.join('"%s"' % a
919 for a in args)), file=build_tools_log)
919920 build_tools_log.flush()
920921 res = execute_command(build_tools_log, config.build_dir(iteration),
921922 args, config.report_dir)
946947
947948 args.extend(['--target=%s' % config.target])
948949
949 print >>configure_log, '%s: running: %s' % (timestamp(),
950 ' '.join('"%s"' % a
951 for a in args))
950 print('%s: running: %s' % (timestamp(),
951 ' '.join('"%s"' % a
952 for a in args)), file=configure_log)
952953 configure_log.flush()
953954
954 print >>sys.stderr, '%s: configuring...' % timestamp()
955 print('%s: configuring...' % timestamp(), file=sys.stderr)
955956 res = execute_command(configure_log, basedir, args, config.report_dir)
956957 configure_log.close()
957958 if res != 0:
967968 obj_path = os.path.join(basedir, suffix)
968969 src_path = os.path.join(config.test_suite_root, suffix)
969970 if not os.path.exists(obj_path):
970 print '%s: initializing test dir %s' % (timestamp(), suffix)
971 print('%s: initializing test dir %s' % (timestamp(), suffix))
971972 os.mkdir(obj_path)
972973 shutil.copyfile(os.path.join(src_path, 'Makefile'),
973974 os.path.join(obj_path, 'Makefile'))
974975
975976
976977 def run_test(nick_prefix, iteration, config):
977 print >>sys.stderr, "%s: checking source versions" % (
978 timestamp(),)
978 print("%s: checking source versions" % (
979 timestamp(), ), file=sys.stderr)
979980
980981 test_suite_source_version = get_source_version(config.test_suite_root)
981982
988989 config)
989990
990991 # Scan for LNT-based test modules.
991 print >>sys.stderr, "%s: scanning for LNT-based test modules" % (
992 timestamp(),)
992 print("%s: scanning for LNT-based test modules" % (
993 timestamp(), ), file=sys.stderr)
993994 test_modules = list(scan_for_test_modules(config))
994 print >>sys.stderr, "%s: found %d LNT-based test modules" % (
995 timestamp(), len(test_modules))
995 print("%s: found %d LNT-based test modules" % (
996 timestamp(), len(test_modules)), file=sys.stderr)
996997
997998 nick = nick_prefix
998999 if config.auto_name:
10001001 cc_info = config.cc_info
10011002 cc_nick = '%s_%s' % (cc_info.get('cc_name'), cc_info.get('cc_build'))
10021003 nick += "__%s__%s" % (cc_nick, cc_info.get('cc_target').split('-')[0])
1003 print >>sys.stderr, "%s: using nickname: %r" % (timestamp(), nick)
1004 print("%s: using nickname: %r" % (timestamp(), nick), file=sys.stderr)
10041005
10051006 basedir = prepare_build_dir(config, iteration)
10061007
10081009 # cause make horrible fits).
10091010
10101011 start_time = timestamp()
1011 print >>sys.stderr, '%s: starting test in %r' % (start_time, basedir)
1012 print('%s: starting test in %r' % (start_time, basedir), file=sys.stderr)
10121013
10131014 # Configure the test suite.
10141015 if config.run_configure or not os.path.exists(os.path.join(
10531054 else:
10541055 test_namespace = 'nightlytest'
10551056 if run_nightly_test:
1056 print >>sys.stderr, '%s: loading nightly test data...' % timestamp()
1057 print('%s: loading nightly test data...' % timestamp(), file=sys.stderr)
10571058 # If nightly test went screwy, it won't have produced a report.
1058 print build_report_path
1059 print(build_report_path)
10591060 if not os.path.exists(build_report_path):
10601061 fatal('nightly test failed, no report generated')
10611062
10751076 existing_tests.add(s.name)
10761077 test_samples.extend(results)
10771078
1078 print >>sys.stderr, '%s: capturing machine information' % (timestamp(),)
1079 print('%s: capturing machine information' % (timestamp(), ), file=sys.stderr)
10791080 # Collect the machine and run info.
10801081 #
10811082 # FIXME: Import full range of data that the Clang tests are using?
11531154 if name in target:
11541155 logger.warning("parameter %r overwrote existing value: %r" %
11551156 (name, target.get(name)))
1156 print target, name, value
1157 print(target, name, value)
11571158 target[name] = value
11581159
11591160 # Generate the test report.
11601161 lnt_report_path = config.report_path(iteration)
1161 print >>sys.stderr, '%s: generating report: %r' % (timestamp(),
1162 lnt_report_path)
1162 print('%s: generating report: %r' % (timestamp(),
1163 lnt_report_path), file=sys.stderr)
11631164 machine = lnt.testing.Machine(nick, machine_info)
11641165 run = lnt.testing.Run(start_time, end_time, info=run_info)
11651166
11661167 report = lnt.testing.Report(machine, run, test_samples)
11671168 lnt_report_file = open(lnt_report_path, 'w')
1168 print >>lnt_report_file, report.render()
1169 print(report.render(), file=lnt_report_file)
11691170 lnt_report_file.close()
11701171
11711172 return report
12351236
12361237 assert len(to_go) >= 1, "Missing at least one accounting file."
12371238 for path in to_go:
1238 print "Removing:", path
1239 print("Removing:", path)
12391240 os.remove(path)
12401241
12411242
17051706 reports = []
17061707
17071708 for i in range(opts.multisample):
1708 print >>sys.stderr, "%s: (multisample) running iteration %d" %\
1709 (timestamp(), i)
1709 print("%s: (multisample) running iteration %d" %\
1710 (timestamp(), i), file=sys.stderr)
17101711 report = run_test(opts.label, i, config)
17111712 reports.append(report)
17121713
17131714 # Create the merged report.
17141715 #
17151716 # FIXME: Do a more robust job of merging the reports?
1716 print >>sys.stderr, "%s: (multisample) creating merged report" % (
1717 timestamp(),)
1717 print("%s: (multisample) creating merged report" % (
1718 timestamp(), ), file=sys.stderr)
17181719 machine = reports[0].machine
17191720 run = reports[0].run
17201721 run.end_time = reports[-1].run.end_time
17251726 lnt_report_path = config.report_path(None)
17261727 report = lnt.testing.Report(machine, run, test_samples)
17271728 lnt_report_file = open(lnt_report_path, 'w')
1728 print >>lnt_report_file, report.render()
1729 print(report.render(), file=lnt_report_file)
17291730 lnt_report_file.close()
17301731
17311732 else:
17411742 lnt_report_path = config.report_path(None)
17421743
17431744 lnt_report_file = open(lnt_report_path, 'w')
1744 print >>lnt_report_file, test_results.render()
1745 print(test_results.render(), file=lnt_report_file)
17451746 lnt_report_file.close()
17461747 merge_run = 'replace'
17471748
0 """LLVM test-suite"""
1 from __future__ import print_function
12 import subprocess
23 import tempfile
34 import json
970971 "iprofiler -timeprofiler -I 40u")
971972
972973 cmd_iprofiler = cmd + ['-DTEST_SUITE_RUN_UNDER=' + iprofiler]
973 print ' '.join(cmd_iprofiler)
974 print(' '.join(cmd_iprofiler))
974975
975976 out = subprocess.check_output(cmd_iprofiler)
976977
0 from __future__ import print_function
1 from lnt.util import NTEmailReport
12 from contextlib import closing
23 from lnt.util import logger
199200
200201 # Print the generic import information.
201202 if 'import_file' in result:
202 print >>out, "Importing %r" % os.path.basename(result['import_file'])
203 print("Importing %r" % os.path.basename(result['import_file']), file=out)
203204 if result['success']:
204 print >>out, "Import succeeded."
205 print >>out
205 print("Import succeeded.", file=out)
206 print(file=out)
206207 else:
207208 out.flush()
208 print >>err, "Import Failed:"
209 print >>err, "%s\n" % result['error']
209 print("Import Failed:", file=err)
210 print("%s\n" % result['error'], file=err)
210211 message = result.get('message', None)
211212 if message:
212 print >>err, "%s\n" % message
213 print >>err, "--------------"
213 print("%s\n" % message, file=err)
214 print("--------------", file=err)
214215 err.flush()
215216 return
216217
222223 # List the parameter sets, if interesting.
223224 show_pset = len(test_results) > 1
224225 if show_pset:
225 print >>out, "Parameter Sets"
226 print >>out, "--------------"
226 print("Parameter Sets", file=out)
227 print("--------------", file=out)
227228 for i, info in enumerate(test_results):
228 print >>out, "P%d: %s" % (i, info['pset'])
229 print >>out
229 print("P%d: %s" % (i, info['pset']), file=out)
230 print(file=out)
230231
231232 total_num_tests = sum([len(item['results'])
232233 for item in test_results])
233 print >>out, "--- Tested: %d tests --" % total_num_tests
234 print("--- Tested: %d tests --" % total_num_tests, file=out)
234235 test_index = 0
235236 result_kinds = collections.Counter()
236237 for i, item in enumerate(test_results):
269270
270271 if show_pset:
271272 name = 'P%d :: %s' % (i, name)
272 print >>out, "%s: %s (%d of %d)" % (result_string, name,
273 test_index, total_num_tests)
273 print("%s: %s (%d of %d)" % (result_string, name,
274 test_index, total_num_tests), file=out)
274275
275276 if result_info:
276 print >>out, "%s TEST '%s' %s" % ('*'*20, name, '*'*20)
277 print >>out, result_info
278 print >>out, "*" * 20
277 print("%s TEST '%s' %s" % ('*'*20, name, '*'*20), file=out)
278 print(result_info, file=out)
279 print("*" * 20, file=out)
279280
280281 if 'original_run' in result:
281 print >>out, ("This submission is a duplicate of run %d, "
282 "already in the database.") % result['original_run']
283 print >>out
282 print(("This submission is a duplicate of run %d, "
283 "already in the database.") % result['original_run'], file=out)
284 print(file=out)
284285
285286 if result['report_to_address']:
286 print >>out, "Report emailed to: %r" % result['report_to_address']
287 print >>out
287 print("Report emailed to: %r" % result['report_to_address'], file=out)
288 print(file=out)
288289
289290 # Print the processing times.
290 print >>out, "Processing Times"
291 print >>out, "----------------"
292 print >>out, "Load : %.2fs" % result['load_time']
293 print >>out, "Import : %.2fs" % result['import_time']
294 print >>out, "Report : %.2fs" % result['report_time']
295 print >>out, "Total : %.2fs" % result['total_time']
296 print >>out
291 print("Processing Times", file=out)
292 print("----------------", file=out)
293 print("Load : %.2fs" % result['load_time'], file=out)
294 print("Import : %.2fs" % result['import_time'], file=out)
295 print("Report : %.2fs" % result['report_time'], file=out)
296 print("Total : %.2fs" % result['total_time'], file=out)
297 print(file=out)
297298
298299 # Print the added database items.
299300 total_added = (result['added_machines'] + result['added_runs'] +
300301 result['added_tests'] + result.get('added_samples', 0))
301302 if total_added:
302 print >>out, "Imported Data"
303 print >>out, "-------------"
303 print("Imported Data", file=out)
304 print("-------------", file=out)
304305 if result['added_machines']:
305 print >>out, "Added Machines: %d" % result['added_machines']
306 print("Added Machines: %d" % result['added_machines'], file=out)
306307 if result['added_runs']:
307 print >>out, "Added Runs : %d" % result['added_runs']
308 print("Added Runs : %d" % result['added_runs'], file=out)
308309 if result['added_tests']:
309 print >>out, "Added Tests : %d" % result['added_tests']
310 print("Added Tests : %d" % result['added_tests'], file=out)
310311 if result.get('added_samples', 0):
311 print >>out, "Added Samples : %d" % result['added_samples']
312 print >>out
313 print >>out, "Results"
314 print >>out, "----------------"
312 print("Added Samples : %d" % result['added_samples'], file=out)
313 print(file=out)
314 print("Results", file=out)
315 print("----------------", file=out)
315316 for kind, count in result_kinds.items():
316 print >>out, kind, ":", count
317 print(kind, ":", count, file=out)
317318
318319
319320 def import_from_string(config, db_name, db, session, ts_name, data,
0 """
11 Utility for submitting files to a web server over HTTP.
22 """
3 from __future__ import print_function
34 import sys
45 import urllib
56 import urllib2
1819 try:
1920 error = json.loads(reply)
2021 except ValueError:
21 print "error: {}".format(reply)
22 print("error: {}".format(reply))
2223 return
2324 sys.stderr.write("error: lnt server: {}\n".format(error.get('error')))
2425 message = error.get('message', '')
5455 return json.loads(result_data)
5556 except Exception:
5657 import traceback
57 print "Unable to load result, not a valid JSON object."
58 print
59 print "Traceback:"
58 print("Unable to load result, not a valid JSON object.")
59 print()
60 print("Traceback:")
6061 traceback.print_exc()
61 print
62 print "Result:"
63 print "error:", result_data
62 print()
63 print("Result:")
64 print("error:", result_data)
6465 return
6566
6667
0 # This code lifted from the mod_wsgi docs.
1 from __future__ import print_function
12 import os
23 import sys
34 import signal
1718 def _restart(path):
1819 _queue.put(True)
1920 prefix = 'monitor (pid=%d):' % os.getpid()
20 print >> sys.stderr, '%s Change detected to \'%s\'.' % (prefix, path)
21 print >> sys.stderr, '%s Triggering process restart.' % prefix
21 print('%s Change detected to \'%s\'.' % (prefix, path), file=sys.stderr)
22 print('%s Triggering process restart.' % prefix, file=sys.stderr)
2223 os.kill(os.getpid(), signal.SIGINT)
2324
2425
115116 _lock.acquire()
116117 if not _running:
117118 prefix = 'monitor (pid=%d):' % os.getpid()
118 print >> sys.stderr, '%s Starting change monitor.' % prefix
119 print('%s Starting change monitor.' % prefix, file=sys.stderr)
119120 _running = True
120121 _thread.start()