perm filename TIMING.MSG[TIM,LSP]63 blob sn#792901 filedate 1985-05-17 generic text, type C, neo UTF8
COMMENT ⊗   VALID 00392 PAGES
C REC  PAGE   DESCRIPTION
C00001 00001
C00047 00002	∂27-Feb-81  1334	Deutsch at PARC-MAXC 	Re: Timings  
C00049 00003	∂27-Feb-81  1342	Dick Gabriel <RPG at SU-AI> 	Timings    
C00051 00004	∂27-Feb-81  1354	RPG  	Timings  
C00053 00005	∂27-Feb-81  1412	Bruce E. Edwards <BEE at MIT-AI> 	Re: timings
C00055 00006	∂27-Feb-81  1427	Deutsch at PARC-MAXC 	Re: Timings  
C00056 00007	∂27-Feb-81  1502	Deutsch at PARC-MAXC 	Re: Timings  
C00058 00008	∂27-Feb-81  1533	Dick Gabriel <RPG at SU-AI> 	Timings    
C00060 00009	∂27-Feb-81  1616	Earl A. Killian <EAK at MIT-MC> 	Timings     
C00061 00010	∂27-Feb-81  1615	George J. Carrette <GJC at MIT-MC> 	timings  
C00062 00011	∂27-Feb-81  1655	David.Neves at CMU-10A 	Re: Timings
C00063 00012	∂27-Feb-81  1658	David.Neves at CMU-10A 	Re: Timings
C00064 00013	∂27-Feb-81  1710	CSVAX.fateman at Berkeley 	Timings 
C00065 00014	∂27-Feb-81  1719	CSVAX.fateman at Berkeley 	Timings 
C00066 00015	∂27-Feb-81  1730	CSVAX.fateman at Berkeley 	timings 
C00068 00016	∂27-Feb-81  1947	George J. Carrette <GJC at MIT-MC> 	Timings  
C00070 00017	∂27-Feb-81  2002	Howard I. Cannon <HIC at MIT-MC> 	Timings    
C00071 00018	∂27-Feb-81  2008	GYRO at MIT-ML (Scott W. Layson) 	Lisp timings    
C00073 00019	∂27-Feb-81  2048	PDL at MIT-DMS (P. David Lebling) 	[Re: Timings  ]
C00074 00020	∂27-Feb-81  2057	JONL at MIT-MC (Jon L White) 	Timings for LISP benchmarks, and reminder of a proposal by Deutsch    
C00081 00021	∂27-Feb-81  2117	Howard I. Cannon <HIC at MIT-MC> 	Timings for LISP benchmarks    
C00082 00022	∂27-Feb-81  2131	CWH at MIT-MC (Carl W. Hoffman) 	Timings     
C00083 00023	∂27-Feb-81  2201	CSVAX.fateman at Berkeley 	here's a test for you to look at/ distribute    
C00092 00024	∂27-Feb-81  2201	CSVAX.fateman at Berkeley 	Timings for LISP benchmarks, and reminder of a proposal by Deutsch  
C00093 00025	∂28-Feb-81  0916	NEDHUE at MIT-AI (Edmund M. Goodhue) 	Timings     
C00094 00026	∂28-Feb-81  1046	Barry Margolin             <Margolin at MIT-Multics> 	Re: Timings
C00095 00027	∂28-Feb-81  1109	Barry Margolin             <Margolin at MIT-Multics> 	Re: Timings
C00096 00028	∂28-Feb-81  1424	Deutsch at PARC-MAXC 	Re: Timings for LISP benchmarks, and reminder of a proposal by 
C00097 00029	∂28-Feb-81  1718	YONKE at BBND 	JONL's message concerning benchmarks    
C00098 00030	∂28-Feb-81  1818	CSVAX.fateman at Berkeley 	why I excluded GC times
C00100 00031	∂28-Feb-81  2014	Guy.Steele at CMU-10A 	Re: Timings 
C00102 00032	∂28-Feb-81  2016	Scott.Fahlman at CMU-10A 	benchmarks    
C00103 00033	∂01-Mar-81  0826	PLATTS at WHARTON-10 ( Steve Platt) 	timing for lisp   
C00104 00034	∂01-Mar-81  1300	RJF at MIT-MC (Richard J. Fateman) 	more lisp mavens   
C00105 00035	∂02-Mar-81  0443	Robert H. Berman <RHB at MIT-MC> 	Timings    
C00106 00036	∂02-Mar-81  0543	Robert H. Berman <RHB at MIT-MC> 	Timings    
C00107 00037	∂02-Mar-81  0741	James E. O'Dell <JIM at MIT-MC> 	Timings
C00110 00038	∂02-Mar-81  1006	Deutsch at PARC-MAXC 	Re: Timings  
C00111 00039	∂02-Mar-81  1312	Barry Margolin             <Margolin at MIT-Multics> 	Re: Timings
C00112 00040	∂02-Mar-81  1634	RPG  	Lisp Timings  
C00117 00041	∂03-Mar-81  1524	RPG  	Lisp Timing Mailing List
C00119 00042	Here's the first message, which you missed:
C00124 00043	∂04-Mar-81  0449	Robert H. Berman <RHB at MIT-MC> 	Lisp Timing Mailing List  
C00129 00044	∂04-Mar-81  0957	Scott.Fahlman at CMU-10A 	Re: Translators    
C00132 00045	∂04-Mar-81  0959	CSVAX.char at Berkeley 	lisp benchmarking    
C00135 00046	∂04-Mar-81  1627	HEDRICK at RUTGERS 	sometime of possible interest 
C00140 00047	∂06-Mar-81  1301	HES at MIT-AI (Howard Shrobe) 	Methodology considerations:  
C00142 00048	Subject: Lisp Timings Group
C00149 00049	∂10-Mar-81  0727	correira at UTEXAS-11  	lisp timings    
C00151 00050	∂03-Mar-81  2109	Barrow at SRI-KL (Harry Barrow ) 	Lisp Timings    
C00154 00051	∂02-Mar-81  0004	Charles Frankston <CBF at MIT-MC> 	timings   
C00158 00052	∂17-Mar-81  1155	Masinter at PARC-MAXC 	Re: GC 
C00162 00053	∂16-Mar-81  1429	HEDRICK at RUTGERS 	Re: Solicitation    
C00167 00054	∂16-Mar-81  1433	HEDRICK at RUTGERS 	Re: GC    
C00174 00055	∂16-Mar-81  1810	Scott.Fahlman at CMU-10A 	Re: GC   
C00176 00056	∂16-Mar-81  1934	PLATTS at WHARTON-10 ( Steve Platt) 	lisp -- my GC and machine specs  
C00180 00057	∂17-Mar-81  0745	Griss at UTAH-20 (Martin.Griss) 	Re: GC      
C00181 00058	∂17-Mar-81  0837	Robert S. Boyer <BOYER at SRI-CSL> 	Solicitation  
C00185 00059	∂17-Mar-81  0847	Robert S. Boyer <BOYER at SRI-CSL> 	LISP Timings  
C00187 00060	∂17-Mar-81  1155	Masinter at PARC-MAXC 	Re: GC 
C00191 00061	∂17-Mar-81  1218	RPG  	Bureaucracy   
C00192 00062	∂17-Mar-81  1921	Bernard S. Greenberg       <Greenberg at MIT-Multics> 	Re: Solicitation    
C00202 00063	∂31-Mar-81  1451	RPG  	Timing Benchmarks  
C00204 00064	∂01-Apr-81  1550	Masinter at PARC-MAXC    
C00209 00065	∂05-Apr-81  2141	JHL   via LONDON    
C00210 00066	∂05-Apr-81  2217	Carl Hewitt <CARL at MIT-AI> 	Lisp Timing Mailing List 
C00211 00067	∂06-Apr-81  1302	RPG  	Timing benchmark   
C00220 00068	∂06-Apr-81  2007	RPG  
C00222 00069	∂05-Apr-81  0208	H at MIT-AI (Jack Holloway) 	lisp timings    
C00223 00070	∂06-Apr-81  1410	HEDRICK at RUTGERS 	Re: Timing benchmark     
C00226 00071	∂06-Apr-81  1931	Bernard S. Greenberg       <Greenberg at MIT-Multics> 	Re: Timing benchmark
C00229 00072	∂06-Apr-81  2008	HEDRICK at RUTGERS 	Re: Timing benchmark     
C00235 00073	∂06-Apr-81  2007	RPG  
C00237 00074	∂07-Apr-81  0924	RPG  	Rules    
C00241 00075	∂07-Apr-81  1323	JONL at MIT-MC (Jon L White) 	Proposed ''mini'' benchmark, with interpretation. 
C00254 00076	∂10-Apr-81  1051	HEDRICK at RUTGERS 	Re: Rules      
C00262 00077	∂10-Apr-81  1205	George J. Carrette <GJC at MIT-MC> 	Rules    
C00264 00078	∂11-Apr-81  1001	CSVAX.jkf at Berkeley 	result of pairs benchmark on franz.  
C00269 00079	∂13-Apr-81  1320	RPG  
C00271 00080	∂13-Apr-81  1239	RPG  	Groundrules (reprise)   
C00276 00081	∂13-Apr-81  1338	CLR at MIT-XX 	Re: Groundrules (reprise)     
C00279 00082	∂13-Apr-81  1724	YONKE at BBND 	Re: Groundrules (reprise)     
C00281 00083	∂13-Apr-81  1934	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	Re: Groundrules (reprise)       
C00286 00084	∂13-Apr-81  2214	HEDRICK at RUTGERS 	Re: Groundrules (reprise)     
C00292 00085	∂21-Apr-81  1316	RPG  	SCCPP    
C00293 00086	∂13-Mar-81  1959	MEEHAN at MIT-AI (James R. Meehan) 
C00295 00087	∂31-Mar-81  1615	Deutsch at PARC-MAXC 	Re: Timing Benchmarks  
C00296 00088	∂21-Apr-81  1604	Greenberg.Symbolics at MIT-Multics 
C00298 00089	∂07-Apr-81  1037	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	Re: Rules        
C00300 00090	∂07-Apr-81  1107	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	Rules - GC time  
C00304 00091	∂07-Apr-81  2213	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	SCCPP on UCI-Lisp
C00307 00092	∂21-Apr-81  2018	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	Lost mail? 
C00319 00093	∂06-Apr-81  1204	RPG  
C00325 00094	∂14-Apr-81  2031	RPG  
C00351 00095	∂22-Apr-81  1801	Bernard S. Greenberg       <Greenberg at MIT-Multics> 	Multics Timing results vindicated  
C00353 00096	∂23-Apr-81  1232	RPG  	FRANZ Benchmark    (FRPOLY)
C00367 00097	∂23-Apr-81  1245	RPG  	Franz benchmark    
C00368 00098	∂24-Apr-81  1324	Bernard S. Greenberg       <Greenberg at MIT-Multics> 	Re: FRANZ Benchmark, Multics Numbers    
C00371 00099	∂24-Apr-81  1414	RPG  	Errata   
C00372 00100	∂24-Apr-81  1608	CSVAX.jkf at Berkeley 	octal vrs decimal
C00373 00101	∂25-Apr-81  1242	Greenberg.Symbolics at MIT-Multics 	Re: octal vrs decimal   
C00374 00102	∂25-Apr-81  1320	Vanmelle at SUMEX-AIM 	Re:  Re: octal vrs decimal 
C00375 00103	∂25-Apr-81  1727	Greenberg.Symbolics at MIT-Multics 	Re:  Re: octal vrs decimal   
C00376 00104	∂25-Apr-81  2210	CSVAX.jkf at Berkeley 	Re:  Re: octal vrs decimal 
C00378 00105	∂24-Apr-81  1411	CSVAX.jkf at Berkeley 	franz timing results  
C00380 00106	∂28-Apr-81  1122	Vanmelle at SUMEX-AIM 	Re: Benchmarks        
C00382 00107	∂28-Apr-81  2115	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	Re: FRANZ Benchmark        
C00386 00108	∂02-May-81  1245	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	Re: FRANZ Benchmark        
C00392 00109	∂04-May-81  1326	correira at UTEXAS-11  	UTLISP benchmarks    
C00394 00110	∂05-May-81  0643	correira at UTEXAS-11  	SCCPP Timings for UTLISP  
C00396 00111	∂05-May-81  0643	correira at UTEXAS-11  	FRPOLY Timings for UTLISP 
C00402 00112	∂05-May-81  0643	correira at UTEXAS-11  	A thumbnail sketch of UTLISP   
C00411 00113	∂26-May-81  0916	George J. Carrette <GJC at MIT-MC> 	benchmark.    
C00428 00114	∂09-Aug-81  1912	RPG   via CMU-20C 	Vacation   
C00429 00115	∂20-Oct-81  1527	LYNCH at USC-ISIB 	Benchmarks for Interlisp-VAX   
C00433 00116	∂20-Oct-81  1614	Doug Lenat <CSD.LENAT at SU-SCORE> 	Save the Dolphins  
C00442 00117	∂20-Oct-81  1744	pratt@Diablo (SuNet) 	Benchmarks for Interlisp-VAX
C00453 00118	∂21-Oct-81  0109	RPG  	Criterion 1   
C00457 00119	∂17-Oct-81  2340	pratt@Diablo (SuNet) 	Fairness
C00467 00120	∂18-Oct-81  2141	pratt@Diablo (SuNet) 	For what it's worth    
C00472 00121	∂18-Oct-81  2254	RPG@Sail (SuNet) 	Several points:       
C00476 00122	∂19-Oct-81  0935	RINDFLEISCH@SUMEX-AIM (SuNet) 	FYI - Other Lisp Timing Thrashes  
C00484 00123	∂19-Oct-81  1045	pratt@Diablo (SuNet) 	Several points:   
C00488 00124	∂19-Oct-81  1143	RPG@Sail (SuNet) 	Long, silly response to Vaughn Pratt      
C00491 00125	∂19-Oct-81  1545	Jeff Rubin <JBR at S1-A> 
C00493 00126	∂21-Oct-81  1325	RPG  	Wall time
C00500 00127	∂22-Oct-81  2009	George J. Carrette <GJC at MIT-MC> 	timing tests and benchmarks. 
C00502 00128	∂10-Dec-81  1050	Jerry Roylance <GLR at MIT-AI> 	LISPM Array Timings    
C00506 00129	∂11-Dec-81  1215	David A. Moon <MOON at MIT-MC> 	LISPM Array Timings    
C00508 00130	∂16-Dec-81  0937	Guy.Steele at CMU-10A 	TAK for S-1 
C00531 00131	∂18-Dec-81  2112	Earl A. Killian            <Killian at MIT-Multics> 	tak    
C00537 00132	∂07-Jan-82  1311	RPG  
C00538 00133	∂13-Jan-82  1015	Kalman Reti <XCON.RETI at DEC-MARLBORO> 	Re: Benchmarks     
C00540 00134	∂29-Jan-82  2149	Kim.fateman at Berkeley 	Okay, you hackers   
C00543 00135	∂19-Feb-82  1603	Richard J. Fateman <RJF at MIT-MC> 
C00545 00136	∂19-Feb-82  1629	George J. Carrette <GJC at MIT-MC> 
C00548 00137	∂26-Feb-82  2006	Howard I. Cannon <HIC at SCRC-TENEX at MIT-AI> 	(TAK 18. 12. 6.) 
C00550 00138	∂27-Feb-82  1152	Howard I. Cannon <HIC at MIT-MC> 	(TAK 18. 12. 6.)     
C00551 00139	∂26-Feb-82  1756	Masinter at PARC-MAXC 	some interesting old numbers    
C00554 00140	∂03-Mar-82  1043	George J. Carrette <GJC at MIT-MC> 
C00561 00141	∂23-Apr-82  2308	RPG  	On the air again   
C00563 00142	∂25-Apr-82  1340	RPG  	FRANZ Benchmark (called FRPOLY)   
C00578 00143	∂25-Apr-82  1349	RPG  	Lisps I want to see
C00579 00144	∂25-Apr-82  1400	RPG  	Takeuchi 
C00586 00145	∂26-Apr-82  1421	RPG  	Puzzle Benchmark   
C00596 00146	∂26-Feb-82  0942	Griss at UTAH-20 (Martin.Griss) 	PIG2.MSG    
C00610 00147	∂28-Feb-82  0940	John O'Donnell <Odonnell at YALE> 	LISP benchmark package   
C00611 00148	∂10-Mar-82  2148	Griss at UTAH-20 (Martin.Griss) 	MACLISP times    
C00613 00149	∂16-Mar-82  0614	Griss at UTAH-20 (Martin.Griss) 	Some new tests   
C00625 00150	∂07-Apr-82  1051	Mike Genesereth <CSD.GENESERETH at SU-SCORE> 	machine timings    
C00629 00151	∂24-Apr-82  0010	Howard I. Cannon <HIC at MIT-MC> 	On the air again     
C00630 00152	∂24-Apr-82  0611	Martin.Griss <Griss at UTAH-20> 	Re: On the air again       
C00631 00153	∂24-Apr-82  0756	Scott E. Fahlman <FAHLMAN at CMU-20C> 	Re: On the air again      
C00633 00154	∂24-Apr-82  0832	MASINTER at PARC-MAXC 	small benchmarks 
C00637 00155	∂24-Apr-82  1102	Glenn S. Burke <GSB at MIT-ML> 	Major Dialects, fyi    
C00638 00156	∂24-Apr-82  1206	Greenberg.Symbolics at MIT-MULTICS 	Re: On the air again    
C00639 00157	∂25-Apr-82  1423	Martin.Griss <Griss at UTAH-20> 	Re: Lisps I want to see    
C00640 00158	∂25-Apr-82  1719	Scott E. Fahlman <FAHLMAN at CMU-20C> 	Re: Lisps I want to see   
C00642 00159	∂27-Apr-82  1102	Kim.jkf at Berkeley 	Re: franz tak benchmarks     
C00645 00160	∂02-Apr-82  0950	Walter van Roggen <VANROGGEN at CMU-20C> 	lisp benchmarks   
C00646 00161	∂26-Apr-82  1222	HEDRICK at RUTGERS (Mgr DEC-20s/Dir LCSR Comp Facility) 	Re: Lisps I want to see
C00648 00162	∂28-Apr-82  1316	RPG  	Gross Lossage 
C00650 00163	∂28-Apr-82  1456	JonL at PARC-MAXC 	Re: Gross Lossage    
C00651 00164	∂29-Apr-82  2158	MASINTER at PARC-MAXC 	MAS benchmark    
C00653 00165	∂29-Apr-82  2244	MASINTER at PARC-MAXC 	non-local TAK    
C00656 00166	∂01-May-82  1044	JonL at PARC-MAXC 	Re: Gross LossageD   
C00659 00167	∂28-Apr-82  1248	Mabry Tyson <Tyson at SRI-AI> 	TAK function!#"%&$$"&#( 
C00663 00168	∂28-Apr-82  1325	Mabry Tyson <Tyson at SRI-AI> 	Re: Gross Lossage  
C00664 00169	∂28-Apr-82  1954	Martin.Griss <Griss at UTAH-20> 	Re: Gross Lossage     
C00665 00170	∂28-Apr-82  2209	Scott E. Fahlman <FAHLMAN at CMU-20C> 	Re: Gross Lossage    
C00666 00171	∂29-Apr-82  1232	Mabry Tyson <Tyson at SRI-AI> 	Re: Gross Lossage  
C00668 00172	∂01-May-82  2326	HEDRICK at RUTGERS (Mgr DEC-20s/Dir LCSR Comp Facility) 	a couple of benchmark results    
C00672 00173	∂03-May-82  2016	HEDRICK at RUTGERS (Mgr DEC-20s/Dir LCSR Comp Facility) 	more timing results    
C00674 00174	∂04-May-82  0021	RPG  	Warning! Extreme Danger Ahead!!   
C00676 00175	∂04-May-82  1259	RPG  	Warning!!! Many Bits Below!!!
C00700 00176	∂04-May-82  1308	RPG  	Barrow FFT    
C00706 00177	∂04-May-82  1317	RPG  	More Info on FFT   
C00707 00178	∂06-May-82  0128	Mabry Tyson <Tyson at SRI-AI> 	MAS results for UCI Lisp
C00709 00179	∂06-May-82  0129	Mabry Tyson <Tyson at SRI-AI> 	FRPOLY results for UCI Lisp  
C00712 00180	∂06-May-82  0128	Mabry Tyson <Tyson at SRI-AI> 	TAK results for UCI Lisp
C00714 00181	∂06-May-82  2022	Kim.fateman at Berkeley 	polynomial benchmark, translated to interlisp
C00725 00182	∂03-May-82  2027	HEDRICK at RUTGERS (Mgr DEC-20s/Dir LCSR Comp Facility) 	Puzzle benchmark  
C00727 00183	∂04-May-82  0208	JONL at PARC-MAXC 	Barrow's MacLISP version of FFT
C00732 00184	∂07-May-82  1956	MASINTER at PARC-MAXC 	Interlisp-10 FFT timings   
C00740 00185	∂07-May-82  2142	MASINTER at PARC-MAXC 	archives for LispTranslators@SU-AI   
C00741 00186	∂07-May-82  2159	MASINTER at PARC-MAXC 	Interlisp-10 TAK timings   
C00742 00187	∂08-May-82  1032	MASINTER at PARC-MAXC 	A note of warning in doing Interlisp-10 timings...  
C00745 00188	∂08-May-82  2132	Kim.jkf at Berkeley 	mas benchmark 
C00746 00189	∂08-May-82  2148	Kim.jkf at Berkeley 	updated mas benchmark results
C00747 00190	∂10-May-82  1000	RPG  	MAS Benchmark 
C00748 00191	∂10-May-82  1917	Mabry Tyson <Tyson at SRI-AI> 	MAS timings for UCI-Lisp
C00749 00192	∂10-May-82  2101	Mabry Tyson <Tyson at SRI-AI> 	UCI Lisp on Basket Puzzle    
C00750 00193	∂11-May-82  1457	Mabry Tyson <Tyson at SRI-AI> 	Lisp timings  
C00756 00194	∂11-May-82  1546	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	UCI Lisp    
C00759 00195	∂12-May-82  0003	Mabry Tyson <Tyson at SRI-AI> 	UCI-Lisp timing on Barrow FFT
C00761 00196	∂12-May-82  0003	Mabry Tyson <Tyson at SRI-AI> 	Note on UCI-Lisp timings on 2060  
C00763 00197	∂06-May-82  1750	Kim.fateman at Berkeley 	here's the code
C00767 00198	∂06-May-82  2009	Kim.fateman at Berkeley 	that fft program in C    
C00768 00199	∂06-May-82  1646	Kim.fateman at Berkeley 	fft benchmark  
C00770 00200	∂25-May-82  0907	jkf@ucbkim at Berkeley 	frpoly benchmark, complete results  
C00775 00201	∂22-May-82  2336	Martin.Griss <Griss at UTAH-20> 	MAS times   
C00776 00202	∂21-May-82  2048	Martin.Griss <Griss at UTAH-20> 	Latest PSL Tak times  
C00778 00203	∂02-Jul-82  0007	RPG  	Lack of Response   
C00780 00204	∂06-Jul-82  1539	RPG  	Symbolic Derivative Benchmark
C00783 00205	∂06-Jul-82  1605	RPG  	Symbolic Derivative (2) 
C00788 00206	∂06-Jul-82  1613	RPG  	Symbolic Derivative (2) 
C00793 00207	∂06-Jul-82  1630	RPG  	Symbolic Derivative (3) 
C00798 00208	∂06-Jul-82  1634	RPG  	Progress 
C00806 00209	∂06-Jul-82  1703	Kim.fateman at Berkeley 	Re:  Progress  
C00807 00210	∂06-Jul-82  1724	ARPAVAX.fateman at Berkeley   
C00808 00211	∂06-Jul-82  1724	Kim.fateman at Berkeley 	benchmark 3    
C00819 00212	∂06-Jul-82  1740	ARPAVAX.fateman at Berkeley   
C00820 00213	∂06-Jul-82  1802	Kim.fateman at Berkeley 	deriv
C00821 00214	∂06-Jul-82  2047	Kim.fateman at Berkeley 	fft benchmark? 
C00830 00215	∂06-Jul-82  1739	Mabry Tyson <Tyson at SRI-AI> 	Re: Progress  
C00833 00216	∂07-Jul-82  1140	RPG  	FFT 
C00834 00217	∂07-Jul-82  1319	Kim.fateman at Berkeley 	Re:  FFT  
C00837 00218	∂07-Jul-82  1811	Kim.fateman at Berkeley 	Re:  FFT  
C00838 00219	∂13-Jul-82  2329	RPG  	Errors   
C00839 00220	∂13-Jul-82  2348	Mabry Tyson <Tyson at SRI-AI> 	Re: Errors    
C00840 00221	∂13-Jul-82  1817	Mabry Tyson <Tyson at SRI-AI> 	Re: Symbolic Derivative (2)  
C00841 00222	∂16-Jul-82  0012	Mabry Tyson <Tyson at SRI-AI> 	DERIV, DDERIV, FDDERIV results    
C00846 00223	∂19-Jul-82  0615	ACORREIRA at BBNA 	address change  
C00847 00224	∂09-Jul-82  2103	Martin.Griss <Griss at UTAH-20> 	Latest TAK #'s   
C00848 00225	∂16-Jul-82  0012	Mabry Tyson <Tyson at SRI-AI> 	DERIV, DDERIV, FDDERIV results    
C00853 00226	∂18-Jul-82  0719	Martin.Griss <Griss at UTAH-20> 	[Martin.Griss <Griss at UTAH-20>: MAS times]   
C00854 00227	∂23-Jul-82  1519	Howard I. Cannon <HIC at SCRC-TENEX at MIT-MC> 	Timings for Symbolics LM-2 
C00860 00228	∂10-Aug-82  1605	RPG  	MAS/TAKL 
C00862 00229	∂04-Aug-82  1052	HIC at SCRC-TENEX 	[DLA: forwarded]
C00864 00230	∂27-Aug-82  1034	Masinter at PARC-MAXC 	old benchmarks out of the past  
C00874 00231	∂30-Sep-82  1218	James Bennett <csd.Bennett at SU-SCORE> 	new timings   
C00876 00232	∂03-Oct-82  2050	James Bennett <csd.Bennett at SU-SCORE> 	initial timings    
C00879 00233	∂06-Oct-82  1907	James Bennett <csd.Bennett at SU-SCORE> 	bug in puzzle 
C00883 00234	∂09-Oct-82  0413	JonL at PARC-MAXC 	Order of magnitude on PUZZLE   
C00885 00235	∂05-Oct-82  1352	Masinter at PARC-MAXC 	a parser benchmark    
C00914 00236	∂09-Nov-82  0853	GBROWN at DEC-MARLBORO 	WHETSTONE BENCHMARK  
C00931 00237	∂10-Nov-82  0903	Friedland 	D0 memory benchmark
C00934 00238	∂28-Nov-82  2251	CL.BOYER at UTEXAS-20 	FRANZ AND REWRITE
C00937 00239	∂31-Jan-83  1751	RPG  
C00938 00240	∂12-Mar-83  1544	BROOKS%MIT-OZ@MIT-MC 	[George J. Carrette <GJC @ MIT-ML>: 750 floating-point accelerator] 
C00941 00241	∂14-Mar-83  2359	TYSON@SRI-AI 	Lisp timings    
C00944 00242	∂26-May-83  1134	SCHMIDT@SUMEX-AIM 	extra 1100 memory benchmark    
C00947 00243	∂26-May-83  1620	Masinter.PA%PARC-MAXC.ARPA@SUMEX-AIM 	Relation of real memory to performance in a demand paging
C00952 00244	∂24-May-83  1344	PW  	Benchmark update    
C00954 00245	∂31-May-83  1117	RPG   	traverse benchmark
C00957 00246	∂01-Jun-83  2314	RPG   	boyer   
C00959 00247	∂06-Jun-83  0956	RPG   	Relation of real memory to performance in a demand paging 
C00964 00248	∂06-Jun-83  0958	RPG   	Re:  Franz on the Sun  
C00967 00249	∂06-Jun-83  1011	RPG  
C00969 00250	∂06-Jun-83  1015	RPG   	Re: Benchmarking  
C00972 00251	 Cull from here
C00974 00252	∂06-Jun-83  1035	RPG   	traverse benchmark
C00977 00253	∂06-Jun-83  1039	RPG   	boyer   
C00979 00254	∂06-Jun-83  1040	RPG   	boyer   
C00981 00255	∂16-Jun-83  0945	RPG  
C00984 00256	∂16-Jun-83  0945	RPG   	[fateman%UCBKIM@Berkeley (Richard Fateman): "macsyma on a chip?"]   
C00987 00257	∂16-Jun-83  1816	RPG   	Re: Varia    
C00991 00258	∂26-Jun-83  1125	RPG  	Timings  
C00994 00259	∂27-Jun-83  0934	RPG   	Re: BenchMarking  
C00995 00260	∂29-Jun-83  1037	RPG   	Benchmarking 
C00997 00261	∂29-Jun-83  1038	RPG   	Benchmarking 
C01000 00262	∂29-Jun-83  1049	RPG   	Re: BenchMarking  
C01002 00263	 ∂18-Jul-83  2228	RPG   	effect of compiler optimizations on benchmarks  
C01005 00264	∂19-Jul-83  1214	@MIT-MC:BEE%SCRC-TENEX%MIT-MC@SU-DSN 	Benchmark results
C01013 00265	∂20-Jul-83  0621	@MIT-MC:DLW%SCRC-TENEX%MIT-MC@SU-DSN 	Comparitive benchmarking   
C01020 00266	∂21-Jul-83  0657	@MIT-MC:DLW%SCRC-TENEX%MIT-MC@SU-DSN 	Comparative Benchmarks     
C01025 00267	∂22-Jul-83  0013	MASINTER.PA@PARC-MAXC.ARPA 	various
C01028 00268	∂22-Jul-83  0358	@MIT-MC:DLW@SCRC-TENEX 	Using our figures for Stanford 
C01032 00269	∂05-Aug-83  2212	GSB@MIT-ML 	NIL Benchmarks    
C01065 00270	∂09-Oct-83  0922	jkf@ucbkim 	results 
C01067 00271	∂09-Oct-83  0937	jkf@ucbkim 	boyer   
C01075 00272	∂09-Oct-83  0939	jkf@ucbkim 	ctak    
C01083 00273	∂09-Oct-83  0946	jkf@ucbkim 	browse  
C01092 00274	∂09-Oct-83  0957	jkf@ucbkim 	destru  
C01101 00275	∂09-Oct-83  1121	jkf@ucbkim 	div2    
C01115 00276	∂09-Oct-83  1122	jkf@ucbkim 	fprint  
C01123 00277	∂09-Oct-83  1346	jkf@ucbkim 	fread   
C01131 00278	∂09-Oct-83  1349	jkf@ucbkim 	stak    
C01139 00279	∂10-Oct-83  0746	jkf@ucbkim 	takl    
C01147 00280	Wow!
C01148 00281	∂12-Oct-83  0715	jkf%ucbkim@Berkeley 	tprint   
C01163 00282	∂15-Oct-83  1041	jkf@ucbkim 	benchmarks   
C01177 00283	∂08-Oct-83  1946	JONL.PA@PARC-MAXC.ARPA 	fyi   
C01192 00284	∂15-Oct-83  1227	jkf@ucbkim 	Re: CPU time 
C01194 00285	To JKF on Nov 30, 1983
C01195 00286	∂09-Nov-83  2240	SCHOEN@SUMEX-AIM.ARPA 	Performance comparisons on D-machines with expert systems
C01201 00287	∂28-Nov-83  2154	SCHOEN@SUMEX-AIM.ARPA 	Dolphin-Dandelion performance comparison  
C01204 00288	∂08-Dec-83  1241	CL.BOYER@UTEXAS-20.ARPA 	MREWRITE on 3600s Release 5.0 
C01208 00289	∂26-Jan-84  1048	WVANROGGEN@DEC-MARLBORO.ARPA 	common lisp benchmarks   
C01212 00290	∂23-Jan-84  1344	@SUMEX-AIM.ARPA:VANBUER@USC-ECL 	performance of transcendental functions in Interlisp
C01221 00291	∂19-Jan-84  1643	MASINTER.PA@PARC-MAXC.ARPA 	varia  
C01223 00292	∂28-Dec-83  0843	@SUMEX-AIM.ARPA:VANBUER@USC-ECL 	Re: Why so slow???    
C01233 00293	∂16-Dec-83  1457	PW  	LispM evaluation issues  
C01235 00294	∂21-Dec-83  2321	GSB@MIT-ML 	benchmarking 
C01240 00295	∂29-Jan-84  1526	WVANROGGEN@DEC-MARLBORO.ARPA 	more on Vax Lisp timings 
C01244 00296	∂08-Feb-84  0217	JonL.pa@PARC-MAXC.ARPA 	Fourth Attempt! 
C01248 00297	∂26-Jan-84  2117	JONL.PA@PARC-MAXC.ARPA 	fyi   
C01254 00298	∂27-Jan-84  1637	WVANROGGEN@DEC-MARLBORO.ARPA 	Common Lisp timings 
C01256 00299	∂27-Jan-84  1639	WVANROGGEN@DEC-MARLBORO.ARPA 	DERIV time
C01257 00300	∂29-Jan-84  1526	WVANROGGEN@DEC-MARLBORO.ARPA 	more on Vax Lisp timings 
C01261 00301	∂04-Feb-84  0953	fateman%ucbdali@Berkeley 	favor    
C01264 00302	∂06-Feb-84  1214	ROD   	Re:  T and franz  
C01269 00303	∂08-Feb-84  0217	JonL.pa@PARC-MAXC.ARPA 	Fourth Attempt! 
C01273 00304	∂09-Feb-84  1325	KESSLER@UTAH-20.ARPA 	Re: Benchmarks    
C01275 00305	∂10-Feb-84  0032	JONL.PA@PARC-MAXC.ARPA 	Re: Results
C01277 00306	∂21-Feb-84  1341	RBATES@USC-ISIB 	Re: Benchmarks    
C01283 00307	∂21-Feb-84  2152	RBATES@USC-ISIB 	Re: By the way    
C01285 00308	∂15-Feb-84  2218	ROD  	lisp timings in AI list.
C01303 00309	∂22-Feb-84  0705	ROACH@RUTGERS.ARPA 	Re: Benchmarks      
C01306 00310	∂23-Feb-84  1006	RBATES@USC-ISIB 	Re: By the way    
C01311 00311	∂12-Mar-84  2039	KESSLER@UTAH-20.ARPA 	Re: Message? 
C01472 00312	∂23-Mar-84  2335	GALWAY@UTAH-20.ARPA 	Latest PSL times on DEC-20   
C01497 00313	∂30-Apr-84  1241	KESSLER@UTAH-20.ARPA 	Cray timings 
C01566 00314	∂30-Apr-84  1402	KESSLER@UTAH-20.ARPA 	Benchmarks   
C01572 00315	∂14-May-84  1250	KESSLER@UTAH-20.ARPA 	Vax timings  
C01574 00316	∂14-May-84  1251	KESSLER@UTAH-20.ARPA 	750 timing tests  
C01609 00317	∂14-May-84  1253	KESSLER@UTAH-20.ARPA 	780 timing tests  
C01641 00318	∂15-May-84  0921	Cassels@SCRC-STONY-BROOK.ARPA 	FFT benchmark 
C01643 00319	∂15-May-84  1220	KESSLER@UTAH-20.ARPA 	DN600   
C01668 00320	∂15-May-84  1219	KESSLER@UTAH-20.ARPA 	[John W. Peterson <JW-Peterson@UTAH-20.ARPA>: First crack at timing apollo tests]  
C01672 00321	∂15-May-84  1220	KESSLER@UTAH-20.ARPA 	Dn300   
C01695 00322	∂18-May-84  1212	jkf%ucbmike@Berkeley 	benchmarks   
C01696 00323	∂18-May-84  1215	jkf%ucbkim@Berkeley 
C01700 00324	∂18-May-84  1215	jkf%ucbkim@Berkeley 
C01706 00325	∂18-May-84  1216	jkf%ucbkim@Berkeley 
C01710 00326	∂18-May-84  1215	jkf%ucbkim@Berkeley 
C01714 00327	∂22-May-84  1152	EJG@S1-A.ARPA 	DESTRUCTIVE better & better   
C01715 00328	∂23-May-84  1640	EJG@S1-A.ARPA 	S-1 TAKL time  
C01717 00329	∂01-Jun-84  1521	CL.BOYER@UTEXAS-20.ARPA 	[Bill Murray <ATP.Murray@UTEXAS-20.ARPA>: Dandelion/3600 Benchmark Results]
C01726 00330	∂12-Jun-84  0006	EJG@S1-A.ARPA 	S-1 DERIV time 
C01728 00331	∂09-Jun-84  0114	EJG@S1-A.ARPA 	S-1 STAK time  
C01729 00332	∂18-Jun-84  2043	GSB@MIT-ML 	timings 
C01733 00333	∂26-Jun-84  1323	vanroggen%bach.DEC@decwrl.ARPA 	vax lisp on 11/7xx
C01740 00334	∂26-Jun-84  1324	vanroggen%bach.DEC@decwrl.ARPA 	vax lisp vs. nil/franz 
C01747 00335	∂19-May-84  0122	GJC@MIT-MC
C01750 00336	∂11-Jul-84  1750	EJG  	S-1 CTAK time 
C01751 00337	∂12-Jul-84  1851	RPG  
C01752 00338	∂17-Jul-84  0738	KESSLER@UTAH-20.ARPA 	Timing Report
C01755 00339	∂17-Jul-84  0156	EJG  	New S-1 PUZZLE time
C01756 00340	∂20-Jul-84  0807	KESSLER@UTAH-20.ARPA 	[jwa@lanl: combined times]  
C01764 00341	∂25-Jul-84  1804	GRISS%hp-hulk.csnet@csnet-relay.arpa 	Our uptodate table    
C01771 00342	∂27-Jul-84  1151	JIM@CMU-CS-C.ARPA 	Perq Lisp bench marks
C01866 00343	∂29-Jul-84  1355	JW-PETERSON@UTAH-20.ARPA 	Timing tests for DSP-160
C01882 00344	∂16-Aug-84  0949	RBATES@USC-ISIB.ARPA 	Lisp    
C01891 00345	∂27-Jul-84  1902	mab@aids-unix 	Lisp Benchmarks
C01894 00346	∂01-Aug-84  0830	DLW@SCRC-STONY-BROOK.ARPA 	Gentlemen:        
C01898 00347	∂03-Aug-84  2140	MCDONALD@SU-SCORE.ARPA 	timing tests    
C01901 00348	∂03-Aug-84  2331	EJG@S1-A.ARPA 	S-1 Lisp BROWSE time!!   
C01902 00349	∂09-Aug-84  1127	fateman%ucbdali@Berkeley 	benchmarks and rpg. <flame on>    
C01905 00350	∂09-Aug-84  1315	pmk@cmu-ri-isl2.arpa 	Lisp performance benchmarks 
C01907 00351	∂15-Aug-84  1344	mab@aids-unix 	Re: Benchmarks 
C01911 00352	∂15-Aug-84  1653	sdcsvax!steve@sdccsu3 	looking for lisp performance evaluation results
C01915 00353	∂15-Aug-84  1920	TYSON@SRI-AI.ARPA 	Timings of LMI  
C01919 00354	∂16-Aug-84  0949	RBATES@USC-ISIB.ARPA 	Lisp    
C01928 00355	∂16-Aug-84  1257	DLW@SCRC-STONY-BROOK.ARPA 	Mutually-clobbering benchmarks   
C01930 00356	∂17-Aug-84  1421	JAM@SU-SCORE.ARPA 	Lisp machine benchmarks   
C01932 00357	∂21-Aug-84  1500	MEEHAN@YALE.ARPA 	Benchmarks  
C01936 00358	∂28-Aug-84  0400	MCDONALD@SU-SCORE.ARPA 	IBM timing data 
C01942 00359	∂04-Sep-84  1905	GJC@MIT-MC 	preliminary benchmark results    
C01947 00360	∂06-Sep-84  1006	KESSLER@UTAH-20.ARPA 	[jwa@LANL (Wayne Anderson): benchmarks with 300,000 wd heap]   
C01971 00361	∂12-Sep-84  0824	DLW@SCRC-STONY-BROOK.ARPA 	Final 3600 numbers
C01978 00362	∂18-Sep-84  0959	BEE@SCRC-QUABBIN.ARPA 	Instruction cache     
C01984 00363	∂25-Sep-84  2158	DABROWN@USC-ECL.ARPA
C01985 00364	∂30-Sep-84  0222	GSB@MIT-MC 	nil internals description   
C02015 00365	∂30-Sep-84  2349	WHOLEY@CMU-CS-C.ARPA 	Implementation details 
C02025 00366	∂08-Oct-84  1158	KESSLER@UTAH-20.ARPA 	PSL on 750/VMS    
C02097 00367	∂08-Oct-84  1158	KESSLER@UTAH-20.ARPA 	PSL on 750/VMS    
C02121 00368	∂08-Oct-84  1328	KESSLER@UTAH-20.ARPA 	Timing Report
C02137 00369	∂10-Oct-84  0907	jrg@cmu-cs-spice.arpa 	UCLA Perq   
C02140 00370	∂29-Oct-84  1537	vanroggen%bach.DEC@decwrl.ARPA 	VAX 8600 times & other info 
C02143 00371	∂29-Oct-84  1614	vanroggen%bach.DEC@decwrl.ARPA 	Re: Times    
C02145 00372	∂30-Oct-84  1233	vanroggen%bach.DEC@decwrl.ARPA 	timing problems   
C02147 00373	∂01-Nov-84  0941	jwa@LANL 	table discrepancy   
C02149 00374	∂31-Oct-84  1204	vanroggen%bach.DEC@decwrl.ARPA 	new 8600 timings  
C02150 00375	∂11-Oct-84  1346	DLW@SCRC-STONY-BROOK.ARPA 	Benchmark results 
C02152 00376	∂12-Oct-84  1435	JonL.pa@Xerox.ARPA 	Interlisp-D machine desrciptions   
C02163 00377	∂12-Nov-84  1406	@MIT-MC:KHS@MIT-OZ 	LMI benchmark update
C02179 00378	∂21-Nov-84  0249	GSB@MIT-MC 	timings 
C02182 00379	∂07-Mar-85  2034	KESSLER@UTAH-20.ARPA 	Sun TImings  
C02209 00380	∂08-Mar-85  1202	kessler%utah-orion@utah-cs 	Timing Report    
C02212 00381	∂12-Mar-85  1820	DLW@SCRC-STONY-BROOK.ARPA 	Varia        
C02216 00382	∂12-Mar-85  0557	DLW@SCRC-RIVERSIDE.ARPA 	Benchmarks
C02224 00383	∂20-Mar-85  1411	DLW@SCRC-STONY-BROOK.ARPA 	Times        
C02226 00384	∂28-Mar-85  1012	OLDMAN@USC-ISI.ARPA 	DG Lisp Benchmarks 
C02248 00385	∂25-Apr-85  1511	fateman%ucbdali@Berkeley 	Re:  Is MIT publishing your paper as a BOOK??    
C02251 00386	∂25-Apr-85  1523	fateman%ucbdali@Berkeley 	RPG's benchmarks coming out as an MIT press book 
C02253 00387	∂25-Apr-85  1750	CL.BOYER@UTEXAS-20.ARPA 	copyright 
C02257 00388	∂26-Apr-85  1102	vanroggen@DEC-HUDSON 	quote & times
C02260 00389	∂26-Apr-85  1304	vanroggen@DEC-HUDSON 	dderiv  
C02261 00390	∂28-Apr-85  1524	WHOLEY@CMU-CS-C.ARPA 	Timings      
C02266 00391	∂15-May-85  1302	GJC@MIT-MC 	Update to LMI-LAMBDA timing figures...
C02268 00392	∂10-May-85  2002	franz!layer@Berkeley 	benches 
C02275 ENDMK
C⊗;
∂27-Feb-81  1334	Deutsch at PARC-MAXC 	Re: Timings  
Date: 27 Feb 1981 13:32 PST
From: Deutsch at PARC-MAXC
Subject: Re: Timings
In-reply-to: RPG's message of 27 Feb 1981 1319-PST
To: Dick Gabriel <RPG at SU-AI>
cc: info-lispm at MIT-AI

Your suggestion sounds great.  What we need is someone to organize the process
just a little.  Such a person would do something like the following:

1) Collect the names of volunteers or contact persons at each site, to send sample
programs to.
2) Collect the sample programs from each site, and disseminate them to the
volunteers or contacts at the other sites.
3) Collect the translated sample programs (in case there was controversy over
whether the translation was "trivial", for example, and for documentation and
historical purposes).
4) Collect the results of the timings run at each site, and disseminate them.

Would you like to volunteer?

∂27-Feb-81  1342	Dick Gabriel <RPG at SU-AI> 	Timings    
Date: 27 Feb 1981 1319-PST
From: Dick Gabriel <RPG at SU-AI>
Subject: Timings  
To:   deutsch at PARC-MAXC
CC:   info-lispm at MIT-AI    

	Since everyone I know of is trying to make a decision about what
to do about Lisp computing in the next five years, perhaps we should
try to co-ordinate a test that will help everyone make a decision.
For instance, each center (PARC, MIT, Stanford, CMU, Berkeley,...)
can provide a program that is of interest to them (not too big, of course);
each test site will then provide someone to re-code (in a very trivial sense:
turning greaterp into >, adding declarations) those programs into reasonably
efficient code for their system. The authors will provide timing data and
timing points in their code.

	Each center may have a few programs since they may have diverse
communities (SAIL and HPP at Stanford). I would be happy to volunteer to
test programs for SAIL MacLisp, which is a 10 version.
			-rpg-



∂27-Feb-81  1354	RPG  	Timings  
To:   deutsch at PARC-MAXC
CC:   RPG at SU-AI, info-lispm at MIT-AI
I will volunteer to co-ordinate the Lisp timing test. I plan to contact:

	Deutsch/Masinter at Parc (InterLisp on MAXC, Dorado, Dolphin...)
	RPG/ROD at SAIL (MacLisp on SAIL, TOPS-20, FOONLY F2)
	VanMelle@SUMEX (InterLisp on TOPS-20)
	Fateman at Berkeley (FranzLisp on VAX)
	Hedrick at Rutgers (UCILISP on TOPS-10?)
	Fahlman/Steele at CMU (SPICELISP on ?, MacLisp on CMU-10)
	HIC at MIT (Lisp Machine)
	JONL at MIT (MacLisp on ITS, NIL on VAX)
	Westfold at SCI (InterLisp on F2)
	Weyhrauch at SAIL (Ilisp on SAIL, LISP1.6 on SAIL)

If anyone has any suggestions about who else to contact or other Lisps
and/or machines to try, let me know soon.

				-rpg-

∂27-Feb-81  1412	Bruce E. Edwards <BEE at MIT-AI> 	Re: timings
Date: 27 February 1981 16:32-EST
From: Bruce E. Edwards <BEE at MIT-AI>
Subject: Re: timings
To: CPR at MIT-EECS
cc: INFO-LISPM at MIT-AI, RWS at MIT-XX

As Peter Deutsch has pointed out this is a crummy benchmark, which was implemented
by relatively unenlighted programming on the CADR. I made it almost 50% faster
in 5 minutes, and the new numbers are much better. They could be made much better,
but basically people aren't interested in hacking uninteresting benchmarks. Things
like a natural language parser or an AI program is more what we are interested in.
There are some data points along this line, but I can't remember the exact numbers.
Hopefully RG has the numbers for the WOODS lunar program tucked away somewhere.

∂27-Feb-81  1427	Deutsch at PARC-MAXC 	Re: Timings  
Date: 27 Feb 1981 14:26 PST
From: Deutsch at PARC-MAXC
Subject: Re: Timings
In-reply-to: RPG's message of 27 Feb 1981 1354-PST
To: Dick Gabriel <RPG at SU-AI>

Great!  Perhaps we will finally throw some light into the murk of claims and
counter-claims about Lisp speeds that have been made for many years.

You might consider sending out some kind of announcement to LISP-FORUM
and/or LISP-DISCUSSION at MIT-AI as well -- I'm not sure everyone of interest
is on INFO-LISPM.

∂27-Feb-81  1502	Deutsch at PARC-MAXC 	Re: Timings  
Date: 27 Feb 1981 13:32 PST
From: Deutsch at PARC-MAXC
Subject: Re: Timings
In-reply-to: RPG's message of 27 Feb 1981 1319-PST
To: Dick Gabriel <RPG at SU-AI>
cc: info-lispm at MIT-AI

Your suggestion sounds great.  What we need is someone to organize the process
just a little.  Such a person would do something like the following:

1) Collect the names of volunteers or contact persons at each site, to send sample
programs to.
2) Collect the sample programs from each site, and disseminate them to the
volunteers or contacts at the other sites.
3) Collect the translated sample programs (in case there was controversy over
whether the translation was "trivial", for example, and for documentation and
historical purposes).
4) Collect the results of the timings run at each site, and disseminate them.

Would you like to volunteer?


∂27-Feb-81  1533	Dick Gabriel <RPG at SU-AI> 	Timings    
Date: 27 Feb 1981 1354-PST
From: Dick Gabriel <RPG at SU-AI>
Subject: Timings  
To:   deutsch at PARC-MAXC
CC:   RPG at SU-AI, info-lispm at MIT-AI

I will volunteer to co-ordinate the Lisp timing test. I plan to contact:

	Deutsch/Masinter at Parc (InterLisp on MAXC, Dorado, Dolphin...)
	RPG/ROD at SAIL (MacLisp on SAIL, TOPS-20, FOONLY F2)
	VanMelle@SUMEX (InterLisp on TOPS-20)
	Fateman at Berkeley (FranzLisp on VAX)
	Hedrick at Rutgers (UCILISP on TOPS-10?)
	Fahlman/Steele at CMU (SPICELISP on ?, MacLisp on CMU-10)
	HIC at MIT (Lisp Machine)
	JONL at MIT (MacLisp on ITS, NIL on VAX)
	Westfold at SCI (InterLisp on F2)
	Weyhrauch at SAIL (Ilisp on SAIL, LISP1.6 on SAIL)

If anyone has any suggestions about who else to contact or other Lisps
and/or machines to try, let me know soon.

				-rpg-



∂27-Feb-81  1616	Earl A. Killian <EAK at MIT-MC> 	Timings     
Date: 27 February 1981 19:16-EST
From: Earl A. Killian <EAK at MIT-MC>
Subject:  Timings  
To: RPG at SU-AI

I've got a queuing simulation program in MC:EAK;SIMUL > that
while it isn't at all typical of AI, uses an interesting mix of
list and numeric computation, and also runs for a fair time while
being not overly long.  I'm not sure whether its useful to you,
but if it is, let me know.

∂27-Feb-81  1615	George J. Carrette <GJC at MIT-MC> 	timings  
Date: 27 February 1981 17:35-EST
From: George J. Carrette <GJC at MIT-MC>
Subject:  timings
To: Deutsch at PARC-MAXC
cc: INFO-LISPM at MIT-MC, masinter at PARC-MAXC, guttag at MIT-XX,
    RWS at MIT-XX

How about using Macsyma? It has some interesting programs in it,
and it has given the Lispmachine quite a work-out on some large
real problems (or did the Lispmachine give macsyma a work out?).

-gjc


∂27-Feb-81  1655	David.Neves at CMU-10A 	Re: Timings
Date: 27 February 1981 1954-EST (Friday)
From: David.Neves at CMU-10A
To: Dick Gabriel <RPG at SU-AI> 
Subject:  Re: Timings
In-Reply-To:  Dick Gabriel's message of 27 Feb 81 16:54-EST
Message-Id: <27Feb81 195427 DN10@CMU-10A>

why not also try TLC lisp on a micro.  ask jra@sail
also BBN's jerico might be relevant but i don't think they
	have a lisp for it.

∂27-Feb-81  1658	David.Neves at CMU-10A 	Re: Timings
Date: 27 February 1981 1957-EST (Friday)
From: David.Neves at CMU-10A
To: Dick Gabriel <RPG at SU-AI> 
Subject:  Re: Timings
In-Reply-To:  Dick Gabriel's message of 27 Feb 81 16:54-EST
Message-Id: <27Feb81 195751 DN10@CMU-10A>

p.s.  also i believe people at BBN are trying to put Interlisp on
 a Prime computer.  If they do have a version up that would be a
 another data point.  i don't know who you would contact though.

∂27-Feb-81  1710	CSVAX.fateman at Berkeley 	Timings 
Date: 27 Feb 1981 16:20:26-PST
From: CSVAX.fateman at Berkeley
To: RPG@SU-AI, deutsch@PARC-MAXC
Subject: Timings
Cc: info-lispm@mit-ai

add Griss@utah-20 (standard lisp on 10, b-1700, ...)

∂27-Feb-81  1719	CSVAX.fateman at Berkeley 	Timings 
Date: 27 Feb 1981 16:22:33-PST
From: CSVAX.fateman at Berkeley
To: RPG@SU-AI, deutsch@PARC-MAXC
Subject: Timings
Cc: info-lispm@mit-ai

add Griss@utah-20 (standard lisp on 10, b-1700, ...)

∂27-Feb-81  1730	CSVAX.fateman at Berkeley 	timings 
Date: 27 Feb 1981 16:43:27-PST
From: CSVAX.fateman at Berkeley
To: Deutsch@PARC-MAXC, GJC@MIT-MC
Subject: timings
Cc: INFO-LISPM@MIT-MC, RWS@MIT-XX, CSVAX.fateman@Berkeley, guttag@MIT-XX, masinter@PARC-MAXC

George: are you offering to put Macsyma up in Interlisp?  We already
have some LM /KL-10/ VAX-11/780 benchmarks (KL-10 maclisp):
Vaxima and Lisp Machine timings for DEMO files
(fg genral, fg rats, gen demo, begin demo)
(garbage collection times excluded.)  Times in seconds.

MC        VAXIMA     128K lm     192K lm    256K lm VAXIMA Jul 80
4.119	   17.250   43.333      19.183     16.483    15.750
2.639	    7.016   55.916      16.416     13.950  
3.141	   10.850  231.516      94.933     58.166   
4.251	   16.700  306.350     125.666     90.716    12.400

(Berkeley CS.VAX 11/780 UNIX April 8, 1980,  KL-10 MIT-MC ITS April 9, 1980.)

improvements due to expanding alike1 and a few odds and ends as macros;
also some improvements in the compiler.


∂27-Feb-81  1947	George J. Carrette <GJC at MIT-MC> 	Timings  
Date: 27 February 1981 22:47-EST
From: George J. Carrette <GJC at MIT-MC>
Subject:  Timings  
To: RPG at SU-AI
cc: deutsch at PARC-MAXC

I have a usefull benchmark which I just tried in Maclisp at MIT-MC
and on a LISPM. It is code which does line-drawing window-clipping 
for arbitrary convex polygonal regions. This code is in actual use.
If you want to see it, it is on MIT-MC in
[MC:BLIS11;CLIP >]. (yes, I hack BLISS. (wow what a compiler!))
It is a nice example because it tests the speed of the FUNCALL dispatch.
The file is conditionalized to run in either LISPM or Maclisp, and
even includes the timing methods used. I would very much like it
if I could run the same (*exactly*) conditionalized source on
N different systems, that way I would have
(1) greater confidence
(2) an exact knowledged of how things are done differently on the
    different systems. e.g. how much hair one has to go through to
    declare things to the compiler.

-gjc

∂27-Feb-81  2002	Howard I. Cannon <HIC at MIT-MC> 	Timings    
Date: 27 February 1981 23:02-EST
From: Howard I. Cannon <HIC at MIT-MC>
Subject:  Timings  
To: RPG at SU-AI

I'll be happy to do the timing tests.
--Howard

∂27-Feb-81  2008	GYRO at MIT-ML (Scott W. Layson) 	Lisp timings    
Date: 27 FEB 1981 2306-EST
From: GYRO at MIT-ML (Scott W. Layson)
Subject: Lisp timings
To: rpg at SU-AI
CC: GYRO at MIT-ML, INFO- at MIT-ML, INFO-LISPM at MIT-ML

I know this is a little silly, but if you have any REALLY tiny
benchmarks (space-wise) I would like to try them out in TLC-Lisp
and muLisp, both running on a 64K Z-80.  These Lisps don't page,
so the program and data have to fit in small real memory.
(Perhaps I should call them "Lisplets"?)

Incidentally, it seems to me that GC time should be included in
the times reported.  Different systems generate garbage at
different rates and deal with it at different efficiencies,
and this shows up in the user-response time of the systems
(which is, after all, what we really want to know).

-- Scott Layson
---------------

∂27-Feb-81  2048	PDL at MIT-DMS (P. David Lebling) 	[Re: Timings  ]
Date: 27 Feb 1981 2348-EST
From: PDL at MIT-DMS (P. David Lebling)
To: rpg at SU-AI
In-reply-to: Message of 27 Feb 81 at 1354 PST by RPG@SU-AI
Subject: [Re: Timings  ]
Message-id: <[MIT-DMS].187847>

You should contact either CLR@MIT-XX or myself for Muddle.
	Dave


∂27-Feb-81  2057	JONL at MIT-MC (Jon L White) 	Timings for LISP benchmarks, and reminder of a proposal by Deutsch    
Date: 27 FEB 1981 2352-EST
From: JONL at MIT-MC (Jon L White)
Subject: Timings for LISP benchmarks, and reminder of a proposal by Deutsch
To: rpg at SU-AI
CC: LISP-DISCUSSION at MIT-MC, BEE at MIT-AI, JHL at MIT-AI
CC: CSVAX.fateman at BERKELEY, RWS at MIT-XX

I notice you sent your proposal to INFO-LISPM, and thought that the
LISP-DISCUSSION community might want to be aware of it too.  (Deutsch and
Masinter are, I believe, on this list, as is Griss).
    Date: 27 Feb 1981 1354-PST
    From: Dick Gabriel <RPG at SU-AI>
    I will volunteer to co-ordinate the Lisp timing test. I plan to contact:
	    Deutsch/Masinter at Parc (InterLisp on MAXC, Dorado, Dolphin...)
	    RPG/ROD at SAIL (MacLisp on SAIL, TOPS-20, FOONLY F2)
	    VanMelle@SUMEX (InterLisp on TOPS-20)
	    Fateman at Berkeley (FranzLisp on VAX)
	    Hedrick at Rutgers (UCILISP on TOPS-10?)
	    Fahlman/Steele at CMU (SPICELISP on ?, MacLisp on CMU-10)
	    HIC at MIT (Lisp Machine)
	    JONL at MIT (MacLisp on ITS, NIL on VAX)
	    Westfold at SCI (InterLisp on F2)
	    Weyhrauch at SAIL (Ilisp on SAIL, LISP1.6 on SAIL)
    If anyone has any suggestions about who else to contact or other Lisps
    and/or machines to try, let me know soon.
The contact for Rutgers-LISP should probably be JOSH@RUTGERS-10
(John Storrs Hall) who is actively extending the formerly-called UCILISP.
Fateman's login name is   CSVAX.fateman@Berkeley   unless there is some 
smarts to his mailer that I don't know about.
Also, I'd like to suggest the following additions
  GRISS@UTAH-20  for "STANDARD-LISP" on PDP10, IBM370, etc
  John Allen (who knows where?) for his "Cromemco" lisp on Z80 etc
  JHL@MIT-AI (Joachim Laubsch, from Stuttgart, West Germany) who might be 
             able to involve the European LISP community.

    I'll also send a letter of these actions to Shigeki Goto of the Nippon 
Telephone Co. in Tokyo, who generated some sort of flurry last fall with his 
incrediblly-simple "benchmark" function TAK.  Actually, TAK may be useful as 
one part of a multi-foliate benchmark, since it specifically test timings 
for (1) function-to-function interface, and (2) simple arithmetic of GREATERP 
and SUB1.  Some of Baskett's benchmarkings score heavily on the array
capabilities, for which FORTRAN compilers "come off smelling like a rose",
and even the fast-arithmetic of MacLISP savors like a garbage dump.

   At the little "lisp discussion" held in Salt Lake City, December 1980,
(attendees partly co-incident with LISP-DISCUSSION mailing list), Peter 
Deutsch made a suggestion which we all liked, but for which there
has been no subsequent action (to my knowledge).  Basically, in order to
educate ourselves into the consequences of the differences between LISP
dialects, and to get some experience in converting "real" code, each
dialect community should nominate a representative piece of "useful code" 
from its enviromment, and the groups responsible for the other
dialects would try to "transport" it into their own.  Several benefits
should accrue:
  (1) If the "representative" is some useful piece of the general environment, 
      say like the DEFMACRO "cache'ing" scheme of MacLISP/NIL, or the
      Interlisp Masterscope, or whatever, then the "transportation" cost 
      will be repaid by having a useful new tool in the other dialects.
  (2) We should accumulate a library of automatic conversion tools, or
      at least of written reports on the problems involved.
  (3) Each community may be affected in a way which (hopefully) will help 
      reduce the hard-core interdialect incompatibilities.
(Apologies to Deutsch for any garbling of the proposal content).

∂27-Feb-81  2117	Howard I. Cannon <HIC at MIT-MC> 	Timings for LISP benchmarks    
Date: 28 February 1981 00:17-EST
From: Howard I. Cannon <HIC at MIT-MC>
Subject:  Timings for LISP benchmarks
To: rpg at SU-AI, deutsch at PARC-MAXC
cc: Greenberg.Symbolics at MIT-MULTICS

I suggest Greenberg.Symbolics@MIT-MULTICS for Multics MacLisp.

∂27-Feb-81  2131	CWH at MIT-MC (Carl W. Hoffman) 	Timings     
Date: 28 FEB 1981 0030-EST
From: CWH at MIT-MC (Carl W. Hoffman)
Subject: Timings  
To: RPG at SU-AI

    Date: 27 Feb 1981 1354-PST
    From: Dick Gabriel <RPG at SU-AI>

    If anyone has any suggestions about who else to contact or other Lisps
    and/or machines to try, let me know soon.

    				-rpg-

You might also contact Richard Lamson or Bernie Greenberg for timings of
MacLisp on various Multics sites.  Net addresses are "Lamson at MIT-Multics"
and "Greenberg at MIT-Multics".

∂27-Feb-81  2201	CSVAX.fateman at Berkeley 	here's a test for you to look at/ distribute    
Date: 27 Feb 1981 21:26:56-PST
From: CSVAX.fateman at Berkeley
To: rpg@su-ai
Subject: here's a test for you to look at/ distribute


;; test from Berkeley based on polynomial arithmetic.

(declare (special ans coef f inc i k qq ss v *x*
		    *alpha *a* *b* *chk *l *p q* u* *var *y*))
(declare (localf pcoefadd pcplus pcplus1 pplus ptimes ptimes1
		 ptimes2 ptimes3 psimp pctimes pctimes1
		 pplus1))
;; Franz uses maclisp hackery here; you can rewrite lots of ways.
(defmacro pointergp (x y) `(> (get ,x 'order)(get ,y 'order)))

(defmacro pcoefp (e) `(atom ,e))
(defmacro pzerop (x) `(signp e ,x))			;true for 0 or 0.0
(defmacro pzero () 0)
(defmacro cplus (x y) `(plus ,x ,y))
(defmacro ctimes (x y) `(times ,x ,y))


(defun pcoefadd (e c x) (cond ((pzerop c) x)
			      (t (cons e (cons c x)))))

(defun pcplus (c p) (cond ((pcoefp p) (cplus p c))
			  (t (psimp (car p) (pcplus1 c (cdr p))))))

(defun pcplus1 (c x)
       (cond ((null x)
	      (cond ((pzerop c) nil) (t (cons 0 (cons c nil)))))
	     ((pzerop (car x)) (pcoefadd 0 (pplus c (cadr x)) nil))
	     (t (cons (car x) (cons (cadr x) (pcplus1 c (cddr x)))))))
	 
(defun pctimes (c p) (cond ((pcoefp p) (ctimes c p))
			   (t (psimp (car p) (pctimes1 c (cdr p))))))

(defun pctimes1 (c x)
       (cond ((null x) nil)
	     (t (pcoefadd (car x)
			  (ptimes c (cadr x))
			  (pctimes1 c (cddr x))))))

(defun pplus (x y) (cond ((pcoefp x) (pcplus x y))
			 ((pcoefp y) (pcplus y x))
			 ((eq (car x) (car y))
			  (psimp (car x) (pplus1 (cdr y) (cdr x))))
			 ((pointergp (car x) (car y))
			  (psimp (car x) (pcplus1 y (cdr x))))
			 (t (psimp (car y) (pcplus1 x (cdr y))))))

(defun pplus1 (x y)
       (cond ((null x) y)
	     ((null y) x)
	     ((= (car x) (car y))
	      (pcoefadd (car x)
			(pplus (cadr x) (cadr y))
			(pplus1 (cddr x) (cddr y))))
	     ((> (car x) (car y))
	      (cons (car x) (cons (cadr x) (pplus1 (cddr x) y))))
	     (t (cons (car y) (cons (cadr y) (pplus1 x (cddr y)))))))

(defun psimp (var x)
       (cond ((null x) 0)
	     ((atom x) x)
	     ((zerop (car x)) (cadr x))
	      (t (cons var x))))

(defun ptimes (x y) (cond ((or (pzerop x) (pzerop y)) (pzero))
			  ((pcoefp x) (pctimes x y))
			  ((pcoefp y) (pctimes y x))
			  ((eq (car x) (car y))
			   (psimp (car x) (ptimes1 (cdr x) (cdr y))))
			  ((pointergp (car x) (car y))
			   (psimp (car x) (pctimes1 y (cdr x))))
			  (t (psimp (car y) (pctimes1 x (cdr y))))))

(defun ptimes1 (*x* y) (prog (u* v)
			       (setq v (setq u* (ptimes2 y)))
			  a    (setq *x* (cddr *x*))
			       (cond ((null *x*) (return u*)))
			       (ptimes3 y)
			       (go a)))

(defun ptimes2 (y) (cond ((null y) nil)
			 (t (pcoefadd (plus (car *x*) (car y))
				      (ptimes (cadr *x*) (cadr y))
				      (ptimes2 (cddr y))))))

(defun ptimes3 (y) 
  (prog (e u c) 
     a1 (cond ((null y) (return nil)))
	(setq e (+ (car *x*) (car y)))
	(setq c (ptimes (cadr y) (cadr *x*) ))
	(cond ((pzerop c) (setq y (cddr y)) (go a1))
	      ((or (null v) (> e (car v)))
	       (setq u* (setq v (pplus1 u* (list e c))))
	       (setq y (cddr y)) (go a1))
	      ((= e (car v))
	       (setq c (pplus c (cadr v)))
	       (cond ((pzerop c) (setq u* (setq v (pdiffer1 u* (list (car v) (cadr v))))))
		     (t (rplaca (cdr v) c)))
	       (setq y (cddr y))
	       (go a1)))
     a  (cond ((and (cddr v) (> (caddr v) e)) (setq v (cddr v)) (go a)))
	(setq u (cdr v))
     b  (cond ((or (null (cdr u)) (< (cadr u) e))
	       (rplacd u (cons e (cons c (cdr u)))) (go e)))
	(cond ((pzerop (setq c (pplus (caddr u) c))) (rplacd u (cdddr u)) (go d))
	      (t (rplaca (cddr u) c)))
     e  (setq u (cddr u))
     d  (setq y (cddr y))
	(cond ((null y) (return nil)))
	(setq e (+ (car *x*) (car y)))
	(setq c (ptimes (cadr y) (cadr *x*)))
     c  (cond ((and (cdr u) (> (cadr u) e)) (setq u (cddr u)) (go c)))
	(go b))) 





!
















(defun pexptsq (p n)
	(do ((n (quotient n 2) (quotient n 2))
	     (s (cond ((oddp n) p) (t 1))))
	    ((zerop n) s)
	    (setq p (ptimes p p))
	    (and (oddp n) (setq s (ptimes s p))) ))



(defun setup nil
  (putprop 'x 1 'order)
  (putprop 'y 2 'order)
  (putprop 'z 3 'order)
  (setq r (pplus '(x 1 1 0 1) (pplus '(y 1 1) '(z 1 1)))) ; r= x+y+z+1
  (setq r2 (ptimes r 100000)) ;r2 = 100000*r
  (setq r3 (ptimes r 1.0)); r3 = r with floating point coefficients
  )
; time various computations of powers of polynomials, not counting
;printing but including gc time ; provide account of g.c. time.

; The following function uses (ptime) for process-time and is thus
;  Franz-specific.

(defun bench (n)
  (setq start (ptime)) ;  Franz ticks, 60 per sec, 2nd number is GC
  (pexptsq r n) 
  (setq res1 (ptime))
  (pexptsq r2 n)
  (setq res2 (ptime))
  ; this one requires bignums.
  (pexptsq r3 n)
  (setq res3 (ptime))
  (list 'power=  n (b1 start res1)(b1 res1 res2)(b1 res2 res3)))
(defun b1(x y)(mapcar '(lambda(r s)(quotient (- s r) 60.0)) x y))

;instructions:
;  after loading, type (setup)
; then (bench 2) ; this should be pretty fast.
; then (bench 5)
; then (bench 10)
; then (bench 15)
;... 

∂27-Feb-81  2201	CSVAX.fateman at Berkeley 	Timings for LISP benchmarks, and reminder of a proposal by Deutsch  
Date: 27 Feb 1981 21:32:33-PST
From: CSVAX.fateman at Berkeley
To: JONL@MIT-MC, rpg@SU-AI
Subject: Timings for LISP benchmarks, and reminder of a proposal by Deutsch
Cc: BEE@MIT-AI, JHL@MIT-AI, LISP-DISCUSSION@MIT-MC

I have sent an entry (polynomial arithmetic system) to rpg@su-ai.
He can examine and redistribute.
  ( fateman@berkeley is equivalent to csvax.fateman@berkeley...)

∂28-Feb-81  0916	NEDHUE at MIT-AI (Edmund M. Goodhue) 	Timings     
Date: 28 FEB 1981 1215-EST
From: NEDHUE at MIT-AI (Edmund M. Goodhue)
Subject: Timings  
To: RPG at SU-AI

I suggest you add Jim Meehan at UCI (maintainer of UCI LISP) who can
run benchmarks on UCILISP and MLISP on both TOP-10 and TOPS-20.  UCI
is not on the net but he can be reached via MEEHAN@MIT-AI.

Ned Goodhue

∂28-Feb-81  1046	Barry Margolin             <Margolin at MIT-Multics> 	Re: Timings
Date:     28 February 1981 1343-est
From:     Barry Margolin             <Margolin at MIT-Multics>
Subject:  Re: Timings
To:       RPG at SU-AI
Cc:       info-lispm at MIT-AI

I think you should also contact someone at MIT-Multics, where they run
MacLisp, although I'm not sure who you should contact.

∂28-Feb-81  1109	Barry Margolin             <Margolin at MIT-Multics> 	Re: Timings
Date:     28 February 1981 1343-est
From:     Barry Margolin             <Margolin at MIT-Multics>
Subject:  Re: Timings
To:       RPG at SU-AI
Cc:       info-lispm at MIT-AI

I think you should also contact someone at MIT-Multics, where they run
MacLisp, although I'm not sure who you should contact.

∂28-Feb-81  1424	Deutsch at PARC-MAXC 	Re: Timings for LISP benchmarks, and reminder of a proposal by 
Date: 28 Feb 1981 14:23 PST
From: Deutsch at PARC-MAXC
Subject: Re: Timings for LISP benchmarks, and reminder of a proposal by
 Deutsch
In-reply-to: JONL's message of 27 FEB 1981 2352-EST
To: rpg at SU-AI, LISP-DISCUSSION at MIT-MC, BEE at MIT-AI, JHL at MIT-AI,
 CSVAX.fateman at BERKELEY, RWS at MIT-XX

JONL accurately represented the content of my proposal.  The set of programs
being submitted for timing tests might indeed be a useful place to start.

∂28-Feb-81  1718	YONKE at BBND 	JONL's message concerning benchmarks    
Date: 28 Feb 1981 2009-EST
Sender: YONKE at BBND
Subject: JONL's message concerning benchmarks
From: YONKE at BBND
To: RPG at SU-AI, Lisp-Discussion at MIT-MC
Message-ID: <[BBND]28-Feb-81 20:09:20.YONKE>

I'd like to add Interlisp on Jericho (our in-house machine).
Also, since BBN has several different flavors of DEC hardware
which run TOPS-20, I wouldn't mind supplying these different
timings and they would probably more informative than Kurt's
(VanMelle) from SUMEX.

Martin

∂28-Feb-81  1818	CSVAX.fateman at Berkeley 	why I excluded GC times
Date: 28 Feb 1981 17:15:23-PST
From: CSVAX.fateman at Berkeley
To: HES@MIT-AI
Subject: why I excluded GC times
Cc: CSVAX.fateman@Berkeley, info-lispm@mit-mc, lisp-discussion@mit-mc

including GC times makes for a very messy statistical situation.
GC time (or even if it happens at all) is dependent on the virtual
address space in use at the time, how much of the macsyma system
has been loaded (in the case of the KL-10), etc.  I do not know
about the LM figures, since I am only reporting stuff sent to me,
but the KL-10 and the VAX typically spend 30% additional time in
GC, averaged over various "production" runs.  Trading off GC time
for system paging time is a funny business, though I agree it
is important.


∂28-Feb-81  2014	Guy.Steele at CMU-10A 	Re: Timings 
Date: 28 February 1981 2313-EST (Saturday)
From: Guy.Steele at CMU-10A
To: Dick Gabriel <RPG at SU-AI> 
Subject:  Re: Timings
In-Reply-To:  Dick Gabriel's message of 27 Feb 81 16:54-EST
Message-Id: <28Feb81 231341 GS70@CMU-10A>

You may want to get in touch with the people at Utah (Standard LISP)
for various machines, and maybe John Allen (who has implementations
for micros, for low end of curve).

Also let me note that you are likely to get a great CACM article or
soemthing out of distilling all this stuff if you want; more power
to you.  I'll coordinate running tests on SPice LISP, though that
may take some time to materialize.
--QW
xxx
--Q

∂28-Feb-81  2016	Scott.Fahlman at CMU-10A 	benchmarks    
Date: 28 February 1981 2315-EST (Saturday)
From: Scott.Fahlman at CMU-10A
To: rpg at su-ai
Subject:  benchmarks
Message-Id: <28Feb81 231549 SF50@CMU-10A>


Hi,
I just added my name to Lisp discussion recently and seem to have missed
something.  Exactly what benchmarks are you running/getting people to
run?  If there was a message that kicked all of this off, I would be
interested in seeing it.

We will be happy to add Spice Lisp on Perq benchmarks when the time comes,
but we won't be ready till summer.
-- Scott

∂01-Mar-81  0826	PLATTS at WHARTON-10 ( Steve Platt) 	timing for lisp   
Date:  1 Mar 1981 (Sunday) 1124-EDT
From: PLATTS at WHARTON-10 ( Steve Platt)
Subject: timing for lisp
To:   rpg at SU-AI

  ...if the systems are not *too* big, I'd like to try them on my micro
(Z80) lisp....  rough limits -- stack is a few hundred calls deep (I can
relink to change this if necessary), cell space is limited to roughly
10K cells.  Most basic major lisp functions (a la maclisp, for the most
part) are implemented, others can be added.
   -Steve

∂01-Mar-81  1300	RJF at MIT-MC (Richard J. Fateman) 	more lisp mavens   
Date:  1 MAR 1981 1600-EST
From: RJF at MIT-MC (Richard J. Fateman)
Subject: more lisp mavens
To: rpg at SU-AI

Try boyer@sri-kl.  They have an F2, and Boyer undoubtedly
could supply theorem-prover benchmark.

∂02-Mar-81  0443	Robert H. Berman <RHB at MIT-MC> 	Timings    
Date: 2 March 1981 07:43-EST
From: Robert H. Berman <RHB at MIT-MC>
Subject:  Timings  
To: RPG at SU-AI
cc: deutsch at PARC-MAXC

Please add me to your timing test survey. I have several
suggestions of features that I would like to know about.

Thanks.

--Bob

∂02-Mar-81  0543	Robert H. Berman <RHB at MIT-MC> 	Timings    
Date: 2 March 1981 08:43-EST
From: Robert H. Berman <RHB at MIT-MC>
Subject:  Timings  
To: RPG at SU-AI
cc: deutsch at PARC-MAXC

Please add me to your timing test survey. I have several
suggestions of features that I would like to know about.

Thanks.

-Bob

∂02-Mar-81  0741	James E. O'Dell <JIM at MIT-MC> 	Timings
Date: 2 March 1981 10:40-EST
From: James E. O'Dell <JIM at MIT-MC>
Subject:  Timings
To: Margolin at MIT-MULTICS
cc: RPG at SU-AI

    Date: 28 February 1981 1343-est
    From: Barry Margolin <Margolin at MIT-Multics>
    To:   RPG at SU-AI
    cc:   info-lispm at MIT-AI
    Re:   Timings

    I think you should also contact someone at MIT-Multics, where they run
    MacLisp, although I'm not sure who you should contact.

If the timings don't take too long to work up I'd be glad to run the
Multics Lisp trials. As you might know we have a Macsyma running there
now, version 293. It typically runs at .6 of a MC. The tricky thing is that
on some BIG problems it runs as fast or faster than MC because of its
larger address space. It spends less of its time collecting garbage than
on MC. I feel that this is an important factor.

At least on of the timings should CONS up a storm. We have had problems
with address space on both the LISPM and on 10's. Some large Macsyma
probems use up all of the address space on the LISPM because we don't run
with the garbage collector. GC'ing on the LISPM slows things down a lot.

I also think that the LISPM is being unfairly compared because of its
single user nature. The numbers do not accurately reflect the responsiveness
observed by the user.


∂02-Mar-81  1006	Deutsch at PARC-MAXC 	Re: Timings  
Date: 2 Mar 1981 10:06 PST
From: Deutsch at PARC-MAXC
Subject: Re: Timings
In-reply-to: RPG's message of 27 Feb 1981 1354-PST
To: Dick Gabriel <RPG at SU-AI>
cc: Masinter

Please take me off the list of people doing Lisp timings.  Larry Masinter or
someone else at PARC who is actively working on Lisp (which I am not) is more
appropriate.

∂02-Mar-81  1312	Barry Margolin             <Margolin at MIT-Multics> 	Re: Timings
Date:     2 March 1981 1610-est
From:     Barry Margolin             <Margolin at MIT-Multics>
Subject:  Re: Timings
To:       JIM at MIT-MC
Cc:       RPG at SU-AI

Bernie Greenberg has already been volunteered to do the Multics MacLisp
timings, although I'm sure he won't mind your help, especially when it
gets to Macsyma timings.

∂02-Mar-81  1634	RPG  	Lisp Timings  
To:   info-lispm at MIT-AI, lisp-discussion at MIT-AI,
      "#TIMING.MSG[TIM,LSP]" at SU-AI
	As most of you know, there will be an attempt made to do a
series of Lisp timings in which various benchmarks submitted by the
Lisp community are tested on a variety of different Lisp systems.
Since there will need to be some translations done in order to run
these benchmarks in systems for which they were not intended, there
is the secondary (!) problem of learning what is really needed to do
these translations more readily in the future.

	I will be co-ordinating this effort and will be distributing
the results when they are in. For this purpose I have set up 3
mailing lists at Stanford:

	LISPTIMING 	 the list of people interested in this topic
	LISPTRANSLATORS, the list of people who have volunteered
			 to do the timing tests (and translations)
			 at the various sites
	LISPSOURCES	 the list of people who will be supplying
			 benchmarks

	You can MAIL to these entities at SAIL (e.g. MAIL
LISPTIMING@SAIL...)  and thus avoid swamping the mailing lists we
have beenusing so far.

	If you care to be on one of these lists, please send me
(rpg@sail) your login name and site exactly as your mailer will
understand it along with which list you wish to be on. If you are
supplying programs or talent, please let me know which Lisp, which
machine, and which OS you are representing.

	In addition, a list of all messages pertaining to this
extravaganza will be on TIMING.MSG[TIM,LSP] at SAIL (you can
FTP from SAIL without logging in). In general, this area will
contain all of the information, programs, and results for this
project.

	If you know of anyone who is not on the net and who may be
able to help, send me a message and a method for getting in touch
with him/her. Over the next few days I hope to establish some of the
methodological considerations (such as GC times) for the project.

			Dick Gabriel	(RPG@SAIL)

∂03-Mar-81  1524	RPG  	Lisp Timing Mailing List
To:   "@LSPTIM.DIS[P,DOC]" at SU-AI   
	Welcome to the Lisp Timing mailing list. As you may have
already guessed, the scope of the Lisp Timing Evaluation project is
very large in scope, and if we are to make a contribution to the
undertanding of how to evaluate such an elusive thing as an entire
computing environment we will need to consider many methodological
issues. Since I am no expert on such evaluations I am going to require
a good deal of help, and so far more than 20 people have volunteered.

	The problems we face are not just how to measure the performance 
of these Lisp systems, but how to take a diverse set of benchmark
programs and get them to run on systems very different than those they
were written for.

	I hope at the end of this project to be able to report not
only times for programs, but descriptions of systems, translation
problems, and a general guide to the world of Lisp computing.

	The first substantive mailing will be a quick list of 
methodological points we need to consider. This list is not complete,
but aims at the directions we need to go before actual timing runs
can be performed.

	Thank you for your help in this project.

			Dick Gabriel (RPG@SAIL)

Here's the first message, which you missed:
∂03-Mar-81  1616	RPG  	Methodology considerations:  
To:   "@LSPTIM.DIS[P,DOC]" at SU-AI   
1. GC time is critical. Every timing should include CPU time
as measured by the CPU clock plus GC time. If GC time is not
accounted in the LISP, we should include a standard test, such
as a program that creates a large, standard structure (balanced
tree of some sort?) and then count CPU time on a forced GC, resulting
in a seconds/cell figure for each machine.  Maybe we should do this
in addition to the benchmarks? [In fact, measuring GC time in a meaningful
way is que difficult due to different algorithms. Perhaps a range of
tree structures? Maybe not all algorithms are symmetric on car/cdr?]

2. Translating non-standard control structures can be a problem.
What about non-local goto's ala catch/throw? These can be simulated
with ERROR/ERRSET or with spaghetti hackery in InterLisp. These questions
should be addressed by having each translator propose various techniques 
and having the source decide on which to use. Or maybe we should use
all such methods?

3. All non-LISP syntax must be pre-expanded (i.e. CLISP) to allow
the local system to optimize as appropriate.

4. Both interpreted and compiled code will be timed.
All code will have macros pre-expanded (at local sites?) so that
efficiencies due to incremental destructive expansion can be
eliminated. 

5. Numeric code should have all types announced to the translators by the
sources so that optimizations can be made without deductions.
All other such information must be provided.

6. The size of such programs can be arbitrary, though translating
MACSYMA may take a while to do. 

7. All tools developed to aid translation should be forwarded to
RPG so that they may be evaluated and distributed if appropriate.

8. Programs that are useful to a source but impossible (in a
practical sense) to translate should merit special attention to 
decide if there is a useful feature involved.

9. (from GLR)
Timing various programs is a good idea, but the data will
be a little hard to extrapolate.  Is anyone going to measure
parameters such as CONS rate, time to invoke a procedure,
and add times? [Not only that, but number CONSing and its
effect on numeric calculations should be measured as well. Here
RPG will appoint some experts (like JONL) to make up some
good numeric testing code to isolate implementational problems
with specific aspects of Lisp).

10. People should supply some estimate of the runtime and the results
of their benchmarks. Such things as 2 minutes of CPU on a KL running
TOPS-10 is OK, but for unfamiliar machines/Lisps this may not be good enough.
Try to aim at some estimate in terms of the number of CONSes or function
call depth.

11. Every candidate system should have a detailed description of that
description (CPU architecture, memory size, address size, paging algorithm...)

∂04-Mar-81  0449	Robert H. Berman <RHB at MIT-MC> 	Lisp Timing Mailing List  
Date: 4 March 1981 07:48-EST
From: Robert H. Berman <RHB at MIT-MC>
Subject:  Lisp Timing Mailing List
To: RPG at SU-AI
cc: " @LSPTIM.DIS[P,DOC]" at SU-AI


May I suggest the following as a benchmark for numerically orientated
problems: the time it takes to do a fast fourier transform of, say length
1024, of real or complex data.


I have been collecting over a period of 6 years timings for this
statistics on a wide range of machines (nearly 50) and compilers,
assemblers etc. Thus, this benchmark would be very helpful
in relating Lisp machine performance to many other architectures.

I have a class of problems that I run that use transform methods
for solving partial differential equations and performing
covolutions and smoothing. Hence my interest in ffts.

Several points to keep in mind about this benchmark:

1. On LM's there is a difference between small flonums and flonums.
Suppose it were done with macsyma's bigfloat package to allow
for  extended precision.

2. Fast Fermat (Integer) Transforms are also helpful here. Integers
in the range 0 to 2↑20, say, can be as useful as small
flonums, but they use only integer arithmatic.

3. Power of 2 transforms, and their families, radix 2, rdaix 4+2,
radix 8+4+2, etc, can do some of their by shifting, rather than
dividing. But other bases, i.e. 96 instead of 64 or 128, can be more
efficient than doubling the transform length.

4. The internal data representation can make a difference. Local
variables on the stack of a subroutine are faster to reference than
arrays. I understand there is an architecturial limit of 64 stack
variables on LM's. Would it ever to be possible to change it? In a 4+2
algorithm, the fastest trasnform using stack variables only could then
be a 256 length transform, and then there would a degradation for
longer transforms that used array references.

5. I don't have a completely good feeling yet for all of the
subtleties and speedups available for microcoding a problem
vs writing in lisp, interpreting, compiling, or compiling
into microcode. When a segment of code is going to be used over and
over again, and the code isn't going to change, shouldn't it be
in microcode?

6. I can make several fft packages avaialable in lisp now. One is a
naive radix 2 butterfly algorithm, designed to be short to write and
implement in a hurry. The second is a radix 4+2 and radix 96 familiy
of transforms that were writen for a vector architecure like the Cray,
but translated nearly literally into lisp. Because the Cray encourages
temporary vectors, this radix 4+2 algorithm uses a lot of storage,
rather than transforms in place. I have not yet looked into the issues
I raised in 4.or 5., but these need attention as well.

--  Bob Berman  (rhb@mc)

∂04-Mar-81  0957	Scott.Fahlman at CMU-10A 	Re: Translators    
Date:  4 March 1981 1212-EST (Wednesday)
From: Scott.Fahlman at CMU-10A
To: Dick Gabriel <RPG at SU-AI> 
Subject:  Re: Translators
CC: guy.steele at CMU-10A
In-Reply-To:  Dick Gabriel's message of 3 Mar 81 19:22-EST
Message-Id: <04Mar81 121256 SF50@CMU-10A>


Dick,
I notice in an earlier message that it was contemplated that a full set of
timings be done on CMU's modified TOPS-10 system running MACLISP.  As a 
point of information, all serious Maclisp work here has been moved to the
2060, now that we have one.  I think that running benchmarks for an obsolete
and obviously brain-damaged system which nobody should ever again be forced to
use for anything seriosu would be a waste of time, and I am not likely to
want to devote any effort to it (although the task would be relatively small
if we get things already translated into legal Maclisp, since the differences
are few).  I could devote some small amount of effort to benchmarking TOPS-20
maclisp, though there are other sites that have this as well and I would prefer
that they carry a good deal of the load on this.

We are willing, even eager, to get timings for Spice Lisp on the extended PERQ
(once we get an extended PERQ), but this effort will lag the others by 6 months
of so while we get our act together.  I would prefer to save our translation
and measurement cycles for that task, since lots of places can check out a
Maclisp.

All of this looks fairly interesting.  It may generate more heat than light,
but at least there will be some data to base the flames on, and the translation
aids should be a very useful side effect.
-- Scott

∂04-Mar-81  0959	CSVAX.char at Berkeley 	lisp benchmarking    
Date: 4 Mar 1981 09:00:47-PST
From: CSVAX.char at Berkeley
To: rpg@sail
Subject: lisp benchmarking
Cc: anlams!boyle@Berkeley, CSVAX.char@Berkeley, CSVAX.fateman@Berkeley

Richard Fateman has informed me of the effort you're organizing to
compare Lisp systems.  James Boyle (csvax.anlams!boyle@BERKELEY) and I
(csvax.anlams!char@BERKELEY) would like to be put on your mailing list
for lisp benchmarking.  We have a program, part of a program
transformation system, which you may be interested in including in the
benchmarking.  It currently runs on Franz, and on the IBM370 Lisp
available at Argonne.  We could create a special version of the code
that predefines variables instead of reading their values off of files;
I/O was the only real problem I had in converting the program to Franz
this past fall.  It is an interesting program in that it is a "real"
application of Lisp -- people have used the transformation system for
development of math software here at Argonne, as preprocessor to a
theorem prover, etc.  It is not so interesting from the viewpoint of
exercising a lot of different Lisp features --  mainly list access and
creation, and CONDing.  Jim Boyle estimates that an interesting
benchmark run would take 30-60 min. of Vax cpu time running under Franz
(interpreted).  This might be too long for benchmarking, if testing
resources are expensive.

∂04-Mar-81  1627	HEDRICK at RUTGERS 	sometime of possible interest 
Date:  4 Mar 1981 1919-EST
From: HEDRICK at RUTGERS
Subject: sometime of possible interest
To: rpg at SU-AI

I am not entirely clear what is going on with your lisp timings
mailing list.  However you may possibly be interested in
looking at the file [rutgers]<hedrick>newlisp.  You can FTP it
without logging in I think.  I you have to log on over FTP,
specify user name ANONYMOUS and any password.  This describes
the various tests I have done during design of ELISP, the new
extended addressing version of UCI Lisp for Tops-20.  I think
ELISP will not have much in the way of innovations.  It in
intended to be quite "classical".  I.e. something that we know
how to do, and know that the results of will be useful for us.
It is Lisp 1.6/UCI Lisp constructed with Lisp machine technology
(to the extent we can do it on the 20, no CDR-coding, since that
requires micro code changes.  But we do using a copying GC and
everything is done with typed pointers.)  I expect the performance to be
similar to that of UCI Lisp, as the basic structures will be the same.
It will differ mostly because of completely different data
representations and GC methods.  And because of extended addressing,
which on the 20 still has performance problems.  NEWLISP refers to these
problems without explaining them.  The main problem is in the design of
the hardware pager. This is the thing that maps virtual to physical
addresses.  It should be associative memory, but is implemented by a
table. The net effect of the table is that the same entry is used for
pages 1000, 3000, 5000, 7000, etc.  In fact, which line in the table is
used is determined by bits 774 of the page number (i.e. pages
1000,1001,1002, and 1003 are all stored in the same line).  There is a
kludge to prevent odd numbered sections from interfering with even
numbered ones (The section number is bits 777000), which is why I listed
pages 1000, 3000,etc., and not 0, 2000, ...  If you happen to be
unlucky, and have code in page 1000, a stack in page 3000, and
data in page 5000, your code can easily run a factor of 20 times 
slower than it would otherwise.  By carefully positioning various
blocks of data most of the problems can be prevented.

Please not that ELISP is intended to be a quick and safe implementation.
That means that I am not trying to get the last few percent of efficiency.
I am doing things in ways that I believe will not require much debugging
time, even at the price of speed.  This is because I am a manager, and
don't have much time to write code or to support it after it is finished.
-------

∂06-Mar-81  1301	HES at MIT-AI (Howard Shrobe) 	Methodology considerations:  
Date:  6 MAR 1981 1556-EST
From: HES at MIT-AI (Howard Shrobe)
Subject: Methodology considerations:  
To: RPG at SU-AI

Re your comment about including GC time. I agree wholheartedly and have been
having a bit of disagreemnet with Fateman about same.  IN addition I would
suggest trying to get statistics on how time shared machines degrade with load.
A lot of folks are trying to make estimates of personal versus time shared and
such choices can only be made if you know how many people can be serviced by a
VAX (KL-10, 2060, etc.) before performance drops off.  Some discussion of this
issue would be useful to such folks.

howie Shrobe

Subject: Lisp Timings Group
To: rpg at SU-AI
cc: correira at UTEXAS-11

Hi.  I've been involved with the maintenance/extensions of two lisps, UTLISP
(on CDC equipment) and UCILISP (Rutgers Lisp, etc).  One of the things that I
did in our version of UCILISP that was missed by Lefaivre (and, hence, Meehan)
was to speed up the interpreter.  (Lefaivre got a copy of my source shortly
before I made the speed ups.)  It actually wound up being a few percent faster
than MACLISP (both on the same TOPS-10 machine).  (I believe MACLISP source
code is close enough to make the same changes - this was very old/unchanged
code in the interpreter.)

Anyway, I'd like to volunteer running the tests on UCI Lisp on both a 2060
(TOPS-20) and a KI-10 (TOPS-10).  I'm a little hesitant about committing
myself to too much work but it looks like you'll have several people running
UCI Lisp so maybe the work will be spread around.  (I guess this means that
you should add me to your LISPTIMING and LISPTRANSLATORS lists.)

For easily transportable code, I'll run it on UTLISP but for any extensive
changes I'll pass.  The current person who is in charge of that Lisp may send
you a separate note.  I've tried to encourage him to do so.  The UTLISP was
(at one time) judged by the Japanese surveyers to be the fastest interpreted
Lisp.  (That is my recollection of the first survey that we were involved in,
sometime about the mid 70's?.  I'm sure it was solely due to the speed of the
hardware.)  It is not an elegant Lisp and has a lot of faults but is a pretty
fast interpreter.  The compiler is a crock - when it works.  It was someone's
masters thesis in the early 70's.

I strongly suggest that you run each of the various Lisps on different CPUs
whenever possible.  There was a note out last fall that compared Interlisp,
Maclisp, and UCI Lisp.  You may remember that I sent out a note that
complained that the timings for UCI Lisp were obviously on a different CPU
(probably a KI-10 compared to KL-10 or 2060).

I also suggest that while general purpose benchmarks may show a general
tendency, we should strive for timings of specific operations.  Such things as
CONS (including GC time), variable binding, function calling, arithmetic,
property list manipulation, array manipulation, stack manipulation (I guess
that's in function calling/variable binding), tree traversing (CAR/CDR
manipulations), FUNARG hacking, COND evaluations, PROG and looping hacking,
etc.  Personally my programs don't use much arithmetic so I don't think that's
too important but obviously some people do.

It would also be useful if people could supply timings of the machine the LISP
is run on.  Such things as instruction fetch times and memory speed are
obviously important.  This might be useful in comparing two Lisps on different
machines.  (Exactly how does a CYBER-170/750 compare with a DEC-2060?)

I don't think that the programs need to be very big or long-running.  They
just need to run long enough (10 seconds?) to minimize minor timing problems.
The important thing is that the various programs concentrate on one specific
area as much as possible.  Of course, all this needs to be balanced by some
programs that have a general mix of operations.

Another possible test, which is not really a timing test, would be to give all
us hackers some particular programming project which would take on the order
of an hour to do.  We would each do it in our own Lisp and report how long it
took us to do it (clock time) and how much resources we used (CPU time).  It
might be also reasonable to report how we did it (eg, used EMACS or some other
editor to write/fix the code versus edit in Lisp itself, how many functions
(macros?), how much commenting, how transparent/hackish the code is, etc.)  I
don't mean that this should be a programming contest but it might give some
idea what is involved in writing a program in each Lisp.  This involves
composing, executing, debugging, and compiling.  I feel this would be a truer
test of a LISP in a typical research situation if we could (hah!) discount the
various programmers skills/resources.  (This suggestion should really stir
up some flames!!)

	Mabry Tyson
	(tyson@utexas-11)
-------

∂10-Mar-81  0727	correira at UTEXAS-11  	lisp timings    
Date: 10 Mar 1981 at 0916-CST
From: correira at UTEXAS-11 
Subject: lisp timings
To: rpg at su-ai
cc: atp.tyson at utexas-20

If anyone is interested, I would be willing to do the work to run the
timing programs for UTLISP Version 5.0.  This is the latest version of
UTLISP, containing code to drag the dialect into the 20th Century of
LISP interpreters.  It has been my experience in the past that
most people shrug UTLISP off with a "oh, that's the one with the extra
pointer field" comment, but I think it is a pretty good LISP now and should be
included in the timings effort. However, the compiler is still a complete
crock (although I am working on a new one, it won't be ready for at least
6 months), so I will pass on doing compiler timings.  Please add my name to
the LISPTIMING and LISPTRANSLATORS mailing lists.

					Alfred Correira
					UTEXAS-11
-------

∂03-Mar-81  2109	Barrow at SRI-KL (Harry Barrow ) 	Lisp Timings    
Date:  3 Mar 1981 1727-PST
From: Barrow at SRI-KL (Harry Barrow )
Subject: Lisp Timings
To: rpg at SU-AI

	I would certainly like to be on your list of recipients of
LISP timing information.   Please add BARROW@SRI-AI to your list.

Did you know that Forrest BAskett has made some comparative timings
of one particular program (cpu-intensive) on several machines, in
several languages?   In particular, LISP was used on DEC 2060, KL-10,
KA-10, and MIT CADR machines   (CADR came out comparable with a KA-10,
but about 50% better if using compiled microcode).

What machines do you plan to use?   I would be very interested to
see how Dolphins, Dorados, and Lisp machines compare...


				Harry.



-------

Yes, I know of Baskett's study. There is at least one other Lisp
study, by Takeuchi in Japan.

So far we have the following Lisp systems with volunteers to
do the timings etc:

Interlisp on MAX, Dolphin, Dorado
MacLisp on SAIL
InterLisp on SUMEX
UCILISP on Rutgers
SpiceLisp on PERQ
Lisp Machine (Symbolics, CADR)
Maclisp on AI, MC, NIL on VAX, NIL on S1 (if available)
InterLisp on F2
Standard Lisp on TOPS-10, B-1700, LISP370
TLC-lisp and muLisp on z-80
Muddle on DMS
Rutgers Lisp
Lisp Machine
UCILISP and MLISP on TOPS-10, TOPS-20
Jericho InterLisp
some Z80 LISP
Multics Maclisp
Cromemco Lisp on Z80
Franz Lisp on VAX UNIX
∂02-Mar-81  0004	Charles Frankston <CBF at MIT-MC> 	timings   
Date: 2 March 1981 00:55-EST
From: Charles Frankston <CBF at MIT-MC>
Subject: timings
To: CSVAX.fateman at BERKELEY
cc: LISP-FORUM at MIT-MC, masinter at PARC-MAXC, RWS at MIT-XX,
    guttag at MIT-XX

It is rather obvious that the timings you distributed are wall times for
the Lisp Machine, whereas the Vax and MC times count only time spent
directly executing code that is considered part of Macsyma.  Ie. the
Vax and MC times exclude not only garbage collection, but operating system
overhard, disk i/o and/or paging, time to output characters to terminals, etc.

I submit comparing wall times with (what the Multics people call) "virtual
CPU" time, is not a very informative excercise.  I'm not sure if the Lisp
Machine has the facilities to make analagous measurements, but everyone
can measure wall time, and in some ways thats the most useful comparison.
Is anyone willing to try the same benchmarks on the Vax and MC with just
one user on and measureing wall times?

Also, are there yet any Lisp machines with greater than 256K words?  No
one would dream of running Macsyma on a 256K word PDP10 and I presume that
goes the same for a 1 Megabyte Vax.  The Lisp Machine may not have a time
sharing system resident in core, but in terms of amount of memory needed
for operating system overhard, the fanciness of its user interface
probably more than makes up for that.  I'll bet another 128K words of
memory would not be beyond the point of diminishing returns, insofar
as running Macsyma.

Lastly, the choice of examples.  Due to internal Macsyma optimizations,
these examples have a property I don't like in a benchmark.  The timings
for subsequent runs in the same environment differ widely from previous
runs.  It is often useful to be able to factor out setup times from a
benchmark.  These benchmarks would seem to run the danger of being dominated
by setup costs.  (Eg. suppose disk I/O is much more expensive on one system;
that is probably not generally interesting to a Macsyma user, but it could
dominate benchmarks such as these.)

I would be as interested as anyone else in seeing the various lisp systems
benchmarked.  I hope there is a reasonable understanding in the various
Lisp communities of how to do fair and accurate, else the results will be
worse than useless, they will be damaging.


∂17-Mar-81  1155	Masinter at PARC-MAXC 	Re: GC 
Date: 17 Mar 1981 11:54 PST
From: Masinter at PARC-MAXC
Subject: Re: GC
In-reply-to: RPG's message of 16 Mar 1981 1234-PST
To: Dick Gabriel <RPG at SU-AI>
cc: LispTiming@su-ai, LispTranslators at SU-AI

Interlisp-D uses a reference-count garbage collection scheme. Thus, "garbage
collection" overhead is distributed to those functions which can modify reference
counts (CONS, RPLACA, etc.) with the following important exceptions:

	no reference counts are maintained for small numbers or literal atoms
	references from the stack are not counted

Reference counts are maintained in a separate table from the data being counted.
The table can be thought of as a hash table. In addition, the "default" entry in
the table is reference count = 1, so that in the "normal" case, there is no table
entry for a particular datum.

"Garbage collection" then consists of (a) sweeping the stack, marking data with a
"referenced from the stack" bit in the reference count table if necessary, (b)
sweeping the reference count table, collecting those data whose reference counts
are 0 and which are not referenced from the stack.

--------------

Because of this scheme, it is very difficult to measure performance of Interlisp-D
independent of garbage collection, because the overhead for garbage collection is
distributed widely (although the timing for the sweep phase can be separated
out).

Secondly, the choice of a reference count scheme over the traditional
chase-and-mark scheme used by most Lisps was conditioned by the belief that
with very large virtual address spaces, it was unreasonable to require touching
all active storage before any garbage could be collected.

This would indicate that any timings should take into consideration paging
performance as well as garbage collection overhead, if they are to accurately
consider the overall performance picture.

Larry

∂16-Mar-81  1429	HEDRICK at RUTGERS 	Re: Solicitation    
Date: 16 Mar 1981 1725-EST
From: HEDRICK at RUTGERS
Subject: Re: Solicitation  
To: RPG at SU-AI
cc: lispsources at SU-AI
In-Reply-To: Your message of 16-Mar-81 1526-EST

ELISP: extended R/UCI lisp.  This will be a reimplementation of
Rutgers/UCI lisp for Tops-20 using extended (30-bit) addressing. It is
implemented using typed pointers and a copying GC, but will otherwise be
almost exactly the same as R/UCI lisp (unless you are accustomed to
CDR'ing into the innards of strings, etc.).
  hardware - Model B KL processor or Jupiter.  I am not clear whether
	a 2020 has extended addressing.  If so that would also be
	usable.
  OS - Tops-20, release 5 or later (release 4 useable with minimal
	patching)
  binding type- shallow dynamic, with same stack mechanisms as
	UCI Lisp
  compiler - Utah standard lisp transported to our environment

At the moment performance appears to be the same as R/UCI Lisp, except
that the GC takes about twice as long for a given number of CONS cells
in use.  The time per CONS may be less for substantial programs, since
we can afford to run with lots of free space, whereas our big programs
are pushing address space, and may not be able to have much free space,
hence GC a lot.

At the moment I have an interpreter that does a substantial part of Lisp
1.6.  I hope to finish Lisp 1.6 by the beginning of the summer.  I also
hope to have a compiler by then.  I am doing the interpreter personally,
and one of my staff is doing the compiler.  I am implementing R/UCI
lisp roughly in historical order, i.e. Lisp 1.6 first, then UCI lisp,
then Rutgers changes, though a few later features are slipping in (and
I am not doing anything I will have to undo).

Note that I have little if any interest in performance.  I want to match
R/UCI lisp, since users may complain if things suddenly slow down, but
that is about it.  I am more concerned about reliability (since I will
have little time to maintain it) and how long it takes to write it
(since I have little time to write it).  Our users are doing completely
traditional Lisp work, and have little or no interest in more flexible
binding or control semantics (we supplied a version of R/UCI lisp with
Scheme semantics, and no one was interested), nor in speed in
arithmetic.  The system is designed to be modular enough that
improvements can be done as needed.  I am giving some thought to
transportability, though not as much as the Utah folks. I think we
should be able to transport it to a system with at least 16 AC's and a
reasonable instruction set (e.g. VAX) with 2 man-months or less.

As far as the hardware we have available for testing, we will shortly
have 1M of MOS memory, 4 RP06's on 2 channel, and a model B KL processor
(the model matters since the model B is faster than the model A.  Note
that the processor model number is almost the only variable you care
about in a 20, but it is not derivable from the DEC marketing
designation, since a 2050 or 2040 may be either model.  However a 2060
is always model B).
-------

∂16-Mar-81  1433	HEDRICK at RUTGERS 	Re: GC    
Date: 16 Mar 1981 1728-EST
From: HEDRICK at RUTGERS
Subject: Re: GC  
To: RPG at SU-AI
cc: lisptranslators at SU-AI
In-Reply-To: Your message of 16-Mar-81 1534-EST

; the garbage collector.  its init routine is called gcinit and
; takes these args:
;   - the beginning of constant data space, which is really at the
;	start of the first of the two data spaces
;   - the first word beyond the constant data space, which is the
;	beginning of the usable part of the first data space
;   - the start of the second data space
;   - the first word beyond the second data space
	; garbage collector variables:
	;free - last used location in data space
	;lastl - last legal location in this data space - 1.  Trigger a GC if
	;   someone tries to go beyond this.  
	;stthis - start of this data space
	;enthis - end of this data space
	;stthat - start of other data space
	;enthat - end of other data space
	;stcnst - start of constant space
	;encnst - end of constant space

	.scalar lastl,stthis,enthis,stthat,enthat,stcnst,encnst

freesz==200000	;amount of free space at end of GC

   <<<initialization code omitted>>>


;This is a copying GC, modelled after the Lisp Machine GC, as
;described in Henry Baker's thesis.  There are two data spaces, old and new.
;A GC copies everything that is in use from old to new, and makes new the
;current one.  The main operation is translating objects.  If the object
;is absolute, e.g. an INUM, this is a no-op.  Only pointers into the old
;space are translated.  They are translated by finding the equivalent object
;in the new space, and using its pointer.  There are two cases:
;  - we have already moved the object.  In this case the first entry of
;	the old space copy is a pointer to the copy in new space.  These
;	pointers have the sign bit on, for easy detection.
;  - we have not moved the object.  In this case, we copy it to the end of
;	new space, and use the pointer to the beginning of this copy.
;At any given time, we have a pointer into new space.  Everything before
;this pointer has been translated.   Everything after it has not.  We also
;have to translate the stack and the constant area.  Indeed it is translating
;these areas that first puts something into new space to translate.

mark==400000,,0		;bit that says this has already been translated

;Because there are four different areas to translate, we have a separate
;routine to do the translation.
;  gctran:
;	w3 - first address to be translated.  W2 is updated, and is the
;		pointer mentioned above.  I.e. everything before W2 has
;		been translated
;	w4 - last address to be translated.

;The code within gctran avoids the use of the stacks, in order to avoid
;performance problems because of addressing conflicts between the stack
;and the areas being GC'ed.

gctran:	move o1,(w3)		;o1 - thing to be translated
	gettyp o1		;see what we have
	xct trntab(w2)		;translate depending upon type
	camge w3,w4		;see if done
	aoja w3,gctran		;no - next
	ret

;GCTRAX - special version of the above for doing new space.  Ends when
;we reach the free pointer
gctrax:	move o1,(w3)		;o1 - thing to be translated
	gettyp o1		;see what we have
	xct trntab(w2)		;translate depending upon type
	camge w3,free		;see if done
	aoja w3,gctrax		;no - next
	ret

;;TYPES
trntab:	jsp w2,cpyatm		; atom
	jfcl			;  constant atom
	jsp w2,cpycon		; cons
	jfcl			;  constant cons
	jsp w2,cpystr		; string
	jfcl			;  constant string
	jsp w2,cpychn		; channel
	jfcl			;  constant channel
	jfcl			; integer
	jsp w2,cpyrea		; real
	jrst 4,.		; hunk
	jfcl			; address
	jsp w2,cpyspc		; special

;here to translate a CONS cell - normally we copy it and use addr of new copy
cpycon:	skipge o2,(o1)		;do we already have a translation in old copy?
	jrst havcon		;yes - use it
	dmove o2,(o1)		;copy it
	dmovem o2,1(free)
	xmovei o2,1(free)	;make address into CONS pointer
	tlo o2,(object(ty%con,0))
	movem o2,(w3)		;put it in place to be translated
	tlc o2,(mark\object(ty%con,0)) ;make a pointer to put into old copy
	movem o2,(o1)		;and put it there
	addi free,2		;advance free list
	jrst (w2)

havcon:	tlc o2,(mark\object(ty%con,0)) ;turn into a real cons pointer
	movem o2,(w3)		;put in place to be translated
	jrst (w2)

  <<<the rest of the types are like unto this>>>
-------

∂16-Mar-81  1810	Scott.Fahlman at CMU-10A 	Re: GC   
Date: 16 March 1981 2109-EST (Monday)
From: Scott.Fahlman at CMU-10A
To: Dick Gabriel <RPG at SU-AI> 
Subject:  Re: GC
In-Reply-To:  Dick Gabriel's message of 16 Mar 81 15:34-EST
Message-Id: <16Mar81 210911 SF50@CMU-10A>


Dick,
I believe we gave you a copy of the SPice Lisp internals document?  If so,
our GC algorithm is described there.  We can run with GC turned off, though
we pay some overhead anyway.  If incremental GC is turned on, the cost is
so spread out that it would be impossible to separate.  Perhaps the only fair
thing to do, if the thingof interest ultimately is large AI jobs, is to run
big things only or smallthings enough times that a few GC will have happened.
Then you can just measure total runtime.
-- Scott

∂16-Mar-81  1934	PLATTS at WHARTON-10 ( Steve Platt) 	lisp -- my GC and machine specs  
Date: 16 Mar 1981 (Monday) 2232-EDT
From: PLATTS at WHARTON-10 ( Steve Platt)
Subject: lisp -- my GC and machine specs
To:   rpg at SU-AI

  Dick, just a reminder about this all...  it is all describing a
lisp for Z80 I'd like to benchmark out of curiosity's sake.
  1) All times will have to be done via stopwatch.  I might write a
quick (DO <n> <expr>) to repeat evaluation oh, say, 100 times or so
for better watch resolution.  GC time will *have* to be included
as I don't seperate it out.
  2) I plan to be speaking to John Allen about his TLC lisp -- as there;s
probably much similarity, I'd like to benchmark his at the same time.
I'll be sending him a copy of this letter.
 
  3) GC is a simple mark'n'sweep.  At some future time, I might replace
this with a compressing algorithm, makes core-image saving simpler.
I GC cons cells and atom space, but not number or string space (number
space for bignums (>1000 hex or so, use pointers for small integers),
string space for pnames.)  Proper strings might be implemented in the
future sometime.
  4) Lisp is an unreleased CDL lisp, still under development.  It works
under CPM 1.4 or anything compatible with that, on a Z80.  CDL Lisp has
its roots in Maclisp, I guess you'd say.  Binding is deep.  Compiler?
Hah -- maybe after my dissertation is finished...  Macros -- the same.
I don't really view macros as essential, so they have a relatively low
priority... both have been thought about, but won't be benchmarkable.
  5) The hardware environment is relatively constrained.  48K physically
right now, may be up to 60K by benchmark time... (this figures into
roughly 8K free cells, the additional 12K will add 3K cells...)
No cache, only 2 8" floppies.  A typical "good" home system.
 
  After reading this all, it's probably relatively depressing when
compared to some of the major machines being benchmarked.  But it is
representative of the home computing environment...

  If you have any more specific questions, feel free to ask.

   -Steve Platt (Platts @ Wharton)

∂17-Mar-81  0745	Griss at UTAH-20 (Martin.Griss) 	Re: GC      
Date: 17 Mar 1981 0835-MST
From: Griss at UTAH-20 (Martin.Griss)
Subject: Re: GC  
To: RPG at SU-AI
cc: Griss at UTAH-20
In-Reply-To: Your message of 16-Mar-81 1334-MST

Standard LISP runs on a variety of machines, with existing LISPs, each with
a different GC; we will choose a machine set, and briefly decsribe;

What is standard AA analysis???
M
-------

∂17-Mar-81  0837	Robert S. Boyer <BOYER at SRI-CSL> 	Solicitation  
Date: 17 March 1981  08:34-PST (Tuesday)
From: Robert S. Boyer <BOYER at SRI-CSL>
To:   Dick Gabriel <RPG at SU-AI>
Cc:   Boyer at SRI-CSL
Subject: Solicitation  

The machine on which I can run LISP timings is a Foonly F2,
which emulates a DEC KA processor and a BBN pager, and runs
a variant of Tenex called Foonex.  It has 1/2 million words
of 500 nanosecond memory, no cache, no drum, and a CDC
Winchester disk.

I have used Interlisp extensively, but I haven't studied the
compiler output or MACRO sources enough to claim expertese
at optimal coding.

I am marginally familiar with Maclisp now and I plan to
become more familiar soon.

For the purpose of getting a complete set of F2 vs. 2060
timings, I'd be willing to run tests of other PDP-10 LISPs
that are Tenex compatible, provided the tests can be
performed without too much understanding of the LISP
variants.

I have a benchmark that J Moore and I constructed a few
months ago to compare Interlisp and Maclisp.  The files on
ARPANET host CSL named <BOYER>IREWRITE and <BOYER>MREWRITE
contain, respectively, Interlisp and Maclisp code for a far
from optimal rewrite style theorem prover.  (To FTP log in
as Anonymous, password foo.)  MREWRITE is coded so that,
except for the statistics gathering, it is also in Franz
LISP.  To start, you invoke (SETUP).  Then run (TEST), as
many times as you want.  TEST returns some statistics -- but
I assume that RPG will want to standardize here.  (TEST)
turns over storage very rapidly, recurses a lot, does very
little arithmetic, and engages in no fancy structuring (e.g.
RPLACs).  Our intention in coding TEST was to produce
quickly a small facsimile of the heart of our rather large
theorem-proving system in order to compare LISP times.

By intentionally coding a program that would be easy to
translate from Interlisp to Maclisp, we did injustice to
both LISPs.  For example, we used recursion where we might
have used the I.S.OPR construct in Interlisp or the DO
construct in Maclisp -- or a MAP construct in either.

∂17-Mar-81  0847	Robert S. Boyer <BOYER at SRI-CSL> 	LISP Timings  
Date: 17 March 1981  08:43-PST (Tuesday)
From: Robert S. Boyer <BOYER at SRI-CSL>
To:   RPG at SU-AI
Subject:  LISP Timings
cc:   Boyer at SRI-CSL

Could we include a cost column in the final grand tally?  It
has been remarked that many people are trying to decide
which LISP system to use, now and in the future.  Cost will
be an important criterion.  Maintenance charges should be
included since over the life of a machine, they may approach
the purchase price.  It should be relatively easy for each
person who voluteers a machine to indicate the purchase
price and maintenance charges.

∂17-Mar-81  1155	Masinter at PARC-MAXC 	Re: GC 
Date: 17 Mar 1981 11:54 PST
From: Masinter at PARC-MAXC
Subject: Re: GC
In-reply-to: RPG's message of 16 Mar 1981 1234-PST
To: Dick Gabriel <RPG at SU-AI>
cc: LispTiming@su-ai, LispTranslators at SU-AI

Interlisp-D uses a reference-count garbage collection scheme. Thus, "garbage
collection" overhead is distributed to those functions which can modify reference
counts (CONS, RPLACA, etc.) with the following important exceptions:

	no reference counts are maintained for small numbers or literal atoms
	references from the stack are not counted

Reference counts are maintained in a separate table from the data being counted.
The table can be thought of as a hash table. In addition, the "default" entry in
the table is reference count = 1, so that in the "normal" case, there is no table
entry for a particular datum.

"Garbage collection" then consists of (a) sweeping the stack, marking data with a
"referenced from the stack" bit in the reference count table if necessary, (b)
sweeping the reference count table, collecting those data whose reference counts
are 0 and which are not referenced from the stack.

--------------

Because of this scheme, it is very difficult to measure performance of Interlisp-D
independent of garbage collection, because the overhead for garbage collection is
distributed widely (although the timing for the sweep phase can be separated
out).

Secondly, the choice of a reference count scheme over the traditional
chase-and-mark scheme used by most Lisps was conditioned by the belief that
with very large virtual address spaces, it was unreasonable to require touching
all active storage before any garbage could be collected.

This would indicate that any timings should take into consideration paging
performance as well as garbage collection overhead, if they are to accurately
consider the overall performance picture.

Larry

p
∂17-Mar-81  1218	RPG  	Bureaucracy   
To:   lisptiming at SU-AI   
In sending mesages around, the following facts are useful:
	RPG is on LISPSOURCES which is equal to
LISPTRANSLATORS, which is a subset of LISPTIMING.

So there is no need to send me a copy of everything, nor
is it necessary to have LISPTIMING and LISPSOURCES on the same
header, for example. Thanks.
			-rpg-

∂17-Mar-81  1921	Bernard S. Greenberg       <Greenberg at MIT-Multics> 	Re: Solicitation    
Date:     17 March 1981 2142-est
From:     Bernard S. Greenberg       <Greenberg at MIT-Multics>
Subject:  Re: Solicitation
To:       lispsources at SU-AI
Cc:       Multics-Lisp-people at MIT-MC

Well, Multics MacLisp, letsee:

Multics Maclisp, consisting of an interpreter, compiler, LAP (not used
by the compiler, tho), runtime, and utilities, was developed by
MIT Lab for Computer Science (LCS) in 1973 with the aim of exporting
the Macsyma math system to Multics (of which MIT-Multics was the only
one at the time).  Dave Reed (now at LCS) and Dave Moon (now at MIT-AI
and Symbolics, Inc.) were the principal implementors then, and
Alex Sunguroff (don't know where he is now) to a lesser degree.
Reed and Moon maintained it to 1976, I maintained it until now.
Its maintenance/support status since my flushance of Honeywell
(December 1980) is now up in the air, although Peter Krupp
at Honeywell is now nominally maintainer.

The interpreter and general scheme of things were developed partly
on the experience of PDP-10 Maclisp, visavis running out of space,
and an earlier Multics Lisp by Reed, visavis better ways to do this
on Multics.   Multics MacLisp features virtually infinite address
space (limited by the size of a Multics Process directory, which
is virtually unlimited), a relocating/copying garbage collector,
strings, bignums and other MacLisp features, general compatibility
with (ITS) MacLisp, and very significantly, the facility to interface
to procedures in other languages (including Multics System routines)
on Multics.

With the notable exception of the compiler, which is a large (and
understandable, as well as effective) Lisp program of two large
source files, the system is in PL/I and Multics assembler: the
assembler portions, including notably the evaluator, are that
way for speed.  The language was designed to be as close to
ITS Maclisp as possible at the time (1973), but has diverged some.
The compiler was developed as two modules, a semantics pass
reworked from the then-current version of the fearsome ITS
COMPLR/NCOMPLR (1973), and the code generator was written anew
by Reed (1973), although it uses NCOMPLR-like strategies
(I have a paper on this subject).

Although used in the support of Macsyma, the largest and most important
use of Multics Maclisp is as the implementation and extension language
of the Multics Emacs "text processing and video process management"
system.  Other large subsystems in Multics Maclisp over the years
have included a Multics crash and problem analysis subsystem and
a management-data modeling system (KOMS, about which I know little).

Pointers in Multics Maclisp are 72-bit, which includes a 9-bit
type field.  Non-bignum numbers (fixna and flona) are directly
encoded in the pointer, and do not require allocation, or the
hirsute "PDLNMK" scheme of ITS MacLisp. Symbols and strings are
allocated contiguously, and relocated at garbage-collect time.
Binding is the standard MacLisp shallow-binding (old values
saved on PDL, symbol contains "current" value).  Other Maclisp
language accoutrements (property lists, functional properties,
MacLisp macros, etc.) exist.

"A description of my OS:"

Well, the Multics Operating System enjoys/suffers a paged,
segmented virtual memory, implementing virtual storage and virtual
file access in a unified fashion. The paradigm is so well-known
that I cannot bear to belabor it any more.  The net effect
on Lisp is a huge address space, and heavy interaction
between the GC algorithm and performance.  Multics will run
in any size memory between 256K words and 16 million (36 bit
words) The Multics at MIT (there are about three dozen multices
all over the world now) has 3 million words of memory,
which I believe is 1 microsecond MOS. The MIT configuration runs
3 cpus - other sites vary between 1 and 5.  The cache per
CPU is 2k words, and is "very fast", but the system gets CPU limited,
and can rarely exceed 1 MIP per cpu (highly asynchrounous processor),
although powerful character and bit string handling instructions
can do a lot faster work than a 1 mip load/store chain.  You
wanted to know a bout disks:

     Date:  16 March 1981 22:54 est
     From:  Sibert (W. Olin Sibert)

     An MSU0451 has 814 cylinders, of 47 records each. Its average seek time
     is 25 ms. (I don't know whether that's track-to-track, 10 percent, or
     half platter -- I'll bet it's track-to-track, though). Its average
     rotational latency is 8.33 ms. Its transfer rate is about 690K 8bit
     bytes (614K 9bit bytes) per second, or 6.7 ms. per Multics record.
     [1024 words]

I cannot really think of benchmark possibilities that would
show the performance of Multics MacLisp to great advantage.
For all its virtual memory, the antiquated basic architecture
of the Honeywell 6000 series (from the GE600) provides a
hostile environment to the Lisp implementor.  Only one register
(AQ) capable of holding a full Lisp pointer exists, and this
same register is the only one you can calculate in, either.
Thus, the compiler can't do useful register optimization
or store-aviodance, and comes nowhere near NCOMPLR, which
is using the same techniques to implement the same language,
in the performance of its object code.
MacLisp type and array declarations are supported, and utilized
in the straightforward way by the compiler to improve generated code,
but in no way could it be claimed that what it generates is
competitive.

Multics MacLisp is "owned by MIT. It is distributed by MIT to anyone
who wants.  It is part of some Honeywell products [Emacs], and is
supported by Honeywell to the extent and only to the extent necessary
to keep these products operative. Honeywell can distribute it,
but may not charge for it, but may charge for products written it it".
Although its support is a current hot potato, interest in using
Multics Maclisp is continually growing, and interesting subsystems
in it are being developed as of this writing.

Anything else?

∂31-Mar-81  1451	RPG  	Timing Benchmarks  
To:   lisptiming at SU-AI   
Since I haven't gotten much in the way of volunteered benchmarks
yet, I propose to begin to write some myself and with the help of
some of you. Here's the initial list of things I want to test the
speed of:

	Array reference and storage (random access)
	Array reference and storage (matrix access)
	Array reference and storage (matrix inversion)
	Short list structure (records, hunks...)
	Long list structure (cdr access)
	CAR heavy structures
	CDR heavy structures
	Interpreted function calls
	Compiled function calls
	Smashed function calls
	Table function calls (FUNCALL, SUBRCALL)
	Tail recursion (?)
	Block compiling
	Reader speed
	Property list structures
	Atom structures (saturated obarrays)
	Internal loops
	Trigonometric functions
	Arithmetic (floating and fixed)
	Special variable lookup
	Local variable lookup
	CONS time
	GC time
	Compiled code load time
	EQ test time
	Arithmetic predicates
	Type determination

Suggestions solicited.
			-rpg-

∂01-Apr-81  1550	Masinter at PARC-MAXC    
Date: 1 Apr 1981 15:49 PST
From: Masinter at PARC-MAXC
To: LispTiming at SU-AI

These are numbers that I generated in late 1977, measuring instruction 
counts for various Interlisp-10 operations. One thing to be careful of in
measuring Interlisp-10 is to watch whether the functions are swapped
or not.... it makes a big difference. I suggest Interlisp-10 timings should
be made (at least once) with NOSWAPFLG set to T before the timed
program is loaded in.

------ begin forwarded message -------

I have just made some measurments of how many instructions it takes 
to do various things in LISP, and I thought they might be of general 
interest.

All measurements are in number of PDP-10 instructions, taking
no account of the relative speed of those instructions.
Measurements for Maxc (which has some special PDP-10 mode
instructions to improve function call performance) are given in
parentheses.

To call a non-swapped compiled function (not in a block) which has
all of its args LOCALVARS takes 50 instructions. (28 on Maxc)
{note that in this and subsequent figures, the time "to call" something
also includes the time to return from it}

To call a SUBR of 1 argument takes 56 instructions. (30 on maxc)
To call a function in the same block where the called function
has all of its args LOCALVARS takes 4 instructions + 1 for each
formal argument.

If the called function has any of its arguments SPECVARS then
it takes 57 instructions plus 12 for each SPECVAR arg and 2 for
each non-specvar arg. To bind variables with a PROG is roughly
the same. (this is 25+9/specvar on Maxc)

Block entry takes 69 instructions, i.e. (BLOCK (FOO FOO)) then
to call FOO will take 19 more instructions than (BLOCKS (NIL FOO
(LOCALVARS . T)))
(this is 45 on Maxc, i.e. about 17 more for block entry).
{you want to do the former if FOO calls itself recursively, though}.

To do a BLKAPPLY* takes 80 instructions + 3 per entry on blkapplyfns
which must be skipped (i.e. if you BLKAPPLY 'FOO and FOO is the
third entry on BLKAPLYFNS then this is 6 extra instructions).
(same on Maxc)

To call a SWAPPED function takes at least 86 additional instructions
per call. This is independent of whether the called function is
a block or a simple function, etc.

A LINKED function call takes 10 more instructions than a non-linked
function call. You should therefore always put (NOLINKFNS . T) 
in your blocks declaration unless you have a specific
reason for wanting the calls linked.

∂05-Apr-81  2141	JHL   via LONDON    
To:   lisptiming at SU-AI   
how about including:
environmet switching (stack-groups, eval with alist (ptr),
	closure, etc)
variable lookup  within switched environment
primitives on strings, bits, bytes seem to be missing

∂05-Apr-81  2217	Carl Hewitt <CARL at MIT-AI> 	Lisp Timing Mailing List 
Date: 6 April 1981 01:08-EST
From: Carl Hewitt <CARL at MIT-AI>
Subject:  Lisp Timing Mailing List
To: RPG at SU-AI
cc: HEWITT at MIT-AI, " @LSPTIM.DIS[P,DOC]" at SU-AI

Dick,

Please change my name on the mailing list to CARL-JUNK
so that receiving mail doesn't interrupt me on line.

Thanks,

Carl

∂06-Apr-81  1302	RPG  	Timing benchmark   
To:   "@LSPTRN.DIS[P,DOC]" at SU-AI   
The following is the first of the timing benchmarks to be tried.  As such
it is fairly simple. It is simply a combinatorial pairing function that
takes 2 sets (represented as lists), a matching function, and some other
constraints, and produces a set of possible pairings of items from each
set. The example below produces 2592 possible pairings. I've included the
entire testing program, which is at the bottom, along with the test data,
which are stored in global variables. Below is reproduced the timings from
the SAIL KL running with a load average of .75. The output of the test
program is the number of pairings, the runtime (EBOX time on the KL), and
the gctime (EBOX time).  The first run involves COREing up in response to
list space exhaustion, which results in the large gctime for the first
run: the other runs are in the resulting core images. I suggest you also
run it a few times to get a stable set of readings.

It would be nice to get some results from you for the LISP meeting at SRI
on wednesday, but that may not be possible.
			-rpg-
2592 
(RUNTIME 1.948) 	;in seconds
(GCTIME 29.711) 

2592 
(RUNTIME 1.887) 
(GCTIME 2.599) 

2592 
(RUNTIME 1.886) 
(GCTIME 2.565) 

2592 
(RUNTIME 1.892) 
(GCTIME 1.922) 

2592 
(RUNTIME 1.895) 
(GCTIME 1.973) 

;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

(DEFUN PAIRS (X Y MUST-APPEAR FUN APPLY-CONSTRAINTS CONSTRAINTS
	      NIL-PAIRS) 
       ((LAMBDA (XXX) 
	 (MAPCAN 
	  (FUNCTION(LAMBDA (I) 
	      (PROGN
	       (COND
		(MUST-APPEAR
		 (*CATCH
		  'OUT
		  (PROGN
		   (MAPC 
		    (FUNCTION(LAMBDA (I) (COND ((MEMBER (CDR I) MUST-APPEAR)
					 (*THROW 'OUT T)))))
		    I)
		   NIL)))
		(T)))
	      (LIST I)))
	  XXX)) 
	(MAPCAR (FUNCTION(LAMBDA (I) (CDR I)))
		(COND ((< (LENGTH X)
			  (+ (COND (NIL-PAIRS 1) (T 0)) (LENGTH Y)))
		       (PAIRS1 (MAKE-POSSIBILITY-1 X
						   Y
						   FUN
						   APPLY-CONSTRAINTS
						   CONSTRAINTS
						   NIL-PAIRS)))
		      (T (PAIRS2 (MAKE-POSSIBILITY-2 Y
						     X
						     FUN
						     APPLY-CONSTRAINTS
						     CONSTRAINTS
						     NIL-PAIRS)))))))


(DEFUN MAKE-POSSIBILITY-1 (X Y FUN APPLY-CONSTRAINTS CONSTRAINTS
			   NIL-PAIRS) 
       ((LAMBDA (N) 
	 ((LAMBDA (Q) 
	   (COND
	    (NIL-PAIRS (MAPC (FUNCTION(LAMBDA (I) (RPLACD I
						   (LIST* '(NIL)
							  (CDR I)))))
			     Q))
	    (Q)))
	  (MAPCAN 
	   (FUNCTION(LAMBDA (I) 
	      (PROGN
	       (SETQ N 0)
	       ((LAMBDA (A) (AND A
				 (OR (NULL CONSTRAINTS)
				     (NULL APPLY-CONSTRAINTS)
				     (FUNCALL APPLY-CONSTRAINTS
					      CONSTRAINTS))
				 (LIST (LIST* I A))))
		(MAPCAN 
		 (FUNCTION(LAMBDA (J) ((LAMBDA (Q) (COND (Q (NCONS Q))))
				(PROGN (SETQ N (1+ N))
				       (COND ((OR (NULL FUN)
						  (FUNCALL FUN I J))
					      (LIST* N J)))))))
		 Y)))))
	   X)))
	0))


(DEFUN MAKE-POSSIBILITY-2 (X Y FUN APPLY-CONSTRAINTS CONSTRAINTS
			   NIL-PAIRS) 
       ((LAMBDA (N) 
	 ((LAMBDA (Q) 
	   (COND
	    (NIL-PAIRS (MAPC (FUNCTION(LAMBDA (I) (RPLACD I
						   (LIST* '(NIL)
							  (CDR I)))))
			     Q))
	    (Q)))
	  (MAPCAN 
	   (FUNCTION(LAMBDA (I) 
	      (PROGN
	       (SETQ N 0)
	       ((LAMBDA (A) (AND A
				 (OR (NULL CONSTRAINTS)
				     (NULL APPLY-CONSTRAINTS)
				     (FUNCALL APPLY-CONSTRAINTS
					      CONSTRAINTS))
				 (LIST (LIST* I A))))
		(MAPCAN 
		 (FUNCTION(LAMBDA (J) ((LAMBDA (Q) (COND (Q (NCONS Q))))
				(PROGN (SETQ N (1+ N))
				       (COND ((OR (NULL FUN)
						  (FUNCALL FUN J I))
					      (LIST* N J)))))))
		 Y)))))
	   X)))
	0))


(DEFUN PAIRS1 (L) 
       (COND
	((NULL L) '((NIL)))
	(T
	 ((LAMBDA (CAND POSS) 
	   (MAPCAN 
	    (FUNCTION(LAMBDA (PAIRS) 
	       (PROGN
		((LAMBDA (AVOID ANS) 
		  (MAPCAN 
		   (FUNCTION(LAMBDA (I) 
			     ((LAMBDA (Q) (COND (Q (NCONS Q))))
			      (PROGN (COND ((CAR (MEMBER (CAR I)
							 AVOID))
					    (LIST* AVOID ANS))
					   (T (LIST* (LIST* (CAR I)
							    AVOID)
						     (LIST* CAND
							    (CDR I))
						     ANS)))))))
		   POSS))
		 (CAR PAIRS)
		 (CDR PAIRS)))))
	    (PAIRS1 (CDR L))))
	  (CAAR L)
	  (CDAR L)))))


(DEFUN PAIRS2 (L) 
       (COND
	((NULL L) '((NIL)))
	(T
	 ((LAMBDA (CAND POSS) 
	   (MAPCAN 
	    (FUNCTION(LAMBDA (PAIRS) 
	       (PROGN
		((LAMBDA (AVOID ANS) 
		  (MAPCAN 
		   (FUNCTION(LAMBDA (I) 
			     ((LAMBDA (Q) (COND (Q (NCONS Q))))
			      (PROGN (COND ((CAR (MEMBER (CAR I)
							 AVOID))
					    (LIST* AVOID ANS))
					   (T (LIST* (LIST* (CAR I)
							    AVOID)
						     (LIST* (CDR I)
							    CAND)
						     ANS)))))))
		   POSS))
		 (CAR PAIRS)
		 (CDR PAIRS))))) 
	    (PAIRS2 (CDR L))))
	  (CAAR L)
	  (CDAR L)))))

(declare (special a b))
(setq a '(
	  (1 2)
	  (7 8)
	  (9 0)
	  (a b c)
	  (a b c)
	  (d e f)
	  (d e f)
	  (g h i)
	  (g h i)
	  (j k l)
	  (m n o)
	  (p q r)
	  ))
(setq b '(
	  (a b c)
	  (j k l)
	  (d e f)
	  (p q r)
	  (g h i)
	  (9 0)
	  (a b c)
	  (p q r)
	  (7 8)
	  (j k l)
	  (2 1)
	  (3 2)
	  (8 7)
	  (9 8)
	  (0 9)
	  (m n o)
	  (d e f)
	  (j k l)
	  (m n o)
	  (d e f)
	  (p q r)
	  (g h i)
	  ))

(defun test ()
 ((lambda (t1 x gt)
	  (setq x (pairs a b () 'equal () () ()))
	  (setq t1 (- (runtime) t1))
	  (setq gt (- (status gctime) gt))
	  (print (length x))
	  (print (list 'runtime
		       (QUOTIENT (FLOAT  (- t1 gt))
				 1000000.)))
	  (print (list 'gctime
		       (quotient (float gt) 1000000.))))
  (runtime) ()(status gctime)))

∂06-Apr-81  2007	RPG  
To:   lisptranslators at SU-AI   
Here's the real timing stuff. As I pointed out, the old RUNTIME
(which is what RUNTIME in MacLisp here gives) is EBOX time (excluding
all memory time). Also, COMPILE the functions. The initial run
on SAIL does many CORE UUOs, which are some page creation stuff on ITS,
so it may run very slowly. The total runtime here at SAIL is given
by adding the GCTIME and WTIME entries below. 

This program is henceforth called: ``SAIL constraint combinatorial pairing
program'' or SCCPP.
			-rpg-
RUNTIME = EBOX time
WTIME = EBOX  + memory time (approx.)

2592 			;number of pairings
(RUNTIME 1.969) 	;EBOX time in seconds
(GCTIME 29.914) 	;GCTIME in seconds
(WTIME 25.8693333) 	;EBOX + MEMORY time in seconds

2592 
(RUNTIME 1.903) 
(GCTIME 2.616) 
(WTIME 5.334) 

2592 
(RUNTIME 2.008) 
(GCTIME 2.61) 
(WTIME 5.59000003) 

2592 
(RUNTIME 1.959) 
(GCTIME 2.065) 
(WTIME 5.86833334) 

2592 
(RUNTIME 1.904) 
(GCTIME 1.921) 
(WTIME 5.1623333) 

∂05-Apr-81  0208	H at MIT-AI (Jack Holloway) 	lisp timings    
Date:  5 APR 1981 0508-EST
From: H at MIT-AI (Jack Holloway)
Subject: lisp timings
To: rpg at SU-AI

Out of curiosity, you might try to get Craig Reynolds at III
to run some timings of Maclisp on the Foonly F-1.  The machine
is roughly 2 to 2.5 times a KL-10 for some things.
I'm not sure he has a net address, but you could get
in contact with him thru Dave Dyer at ISI.

∂06-Apr-81  1410	HEDRICK at RUTGERS 	Re: Timing benchmark     
Date:  6 Apr 1981 1701-EST
From: HEDRICK at RUTGERS
Subject: Re: Timing benchmark   
To: RPG at SU-AI
In-Reply-To: Your message of 6-Apr-81 1602-EST

Any chance I could get you to stick to functions defined in McCarthy?
I can guess what most of them are, but it would be better no to have
to guess.  If you would like to send me a copy of the Maclisp manual
(if there is a Maclisp manual), that would be OK, too.

Also, when you say E-box time, what do you mean?  If you mean E-box
ticks, converting those to time is non-straightforward.  What conversion
do you use?  If that is what you want, it will favor Elisp, since
using E-box ticks will eliminate the overhead due to pager refills,
which is what slows us down compared to non-extended Lisp.

I will try to figure out your functions and run them tonight.  However
I do not keep separate GC timing yet, so you probably won't get that.
-------

1. I think it is specifically a bad idea to stick to McCarthy functions
because we are doing ``real programs''. For each non-standard function
we can provide a definite semantics; if your (generic) LISP cannot deal with
it, then that is good specific knowledge.

2. It is exactly the EBOX ticks, ignoring memory time. But we don't
page (or swap) so this isn't too outrageous. Currently there is
only one very informal way to measure memory time, which I will
include later on in the real timing results: what I sent was
simply an indication for you to judge whether your LISP is
running the problem correctly.

3. We will need to devise a separate GC time test. I provided it because it
was a superset of the information I'd like to get from all of you.
			-rpg-
∂06-Apr-81  1931	Bernard S. Greenberg       <Greenberg at MIT-Multics> 	Re: Timing benchmark
Date:     6 April 1981 2147-est
From:     Bernard S. Greenberg       <Greenberg at MIT-Multics>
Subject:  Re: Timing benchmark
To:       RPG at SU-AI
Cc:       "@LSPTRN.DIS[P,DOC]" at SU-AI

I ran this program three times on MIT-Multics. Heres what it said.

Runtime  (secs)   86.4  86.4  86.8
gctime   (secs)    2.4  3.8    2.5     (41 users out of max
                                        set at 120)

Before we accept that this implementation is really 60 times
slower than MACLISP on SAIL, I would like to point out that
the unmodified program, sent in the mail, also ran on AI-ITS
MACLISP, also compiled, for FIVE SOLID WALL-CLOCK MINUTES (as
it did on Multics) without opening its mouth, but I quit it
before it finished on AI (several times). The KA-10 is reported
to be 2 or 3 times slower (instruction rate) than Multics.
The KL-10 at sail, given all benefit of the doubt, is NOT
TWO ORDERS OF MAGNITUDE faster than the AI KA-10.

Has anyone else encountered difficulty in making this program run
in small number of seconds of CPU time?  Perhaps there is some
subtle conversion problem I missed in my perusal of this program?
(I made no modifications other than converting the characters to
lower-case, and on Multics, macroing *catch/*throw to catch/throw).

∂06-Apr-81  2008	HEDRICK at RUTGERS 	Re: Timing benchmark     
Date:  6 Apr 1981 2307-EST
From: HEDRICK at RUTGERS
Subject: Re: Timing benchmark   
To: RPG at SU-AI
In-Reply-To: Your message of 6-Apr-81 1602-EST

I translated your benchmark to R/UCI Lisp.  The only non-obvious
translation was due to the fact that in R/UCI Lisp (as in all Lisp 1.6
derivatives) there can only be 5 arguments to compiled functions.
Fortunately the arguments beyond the 4th were always NIL in your case,
so I just eliminated them.  (R/UCI Lisp has a special kind of function,
as LEXPR, that allows more than 5 arguments.  However its use did not
seem necessary in this case.)

Totals, including GC:

R/UCI Lisp (with free space expanded by 50K):
  interpreted:  15.0
  compiled:      4.6
  ", NOUUO:      3.2  (of which .6 is GC)

By NOUUO I mean that linkage between compiled functions is by direct
PUSHJ, not going through the intepreter.  This makes tracing and
breaking impossible.  (In Elisp we will use a protocol that does not
have this problem.)  Note that total runtimes are slightly better than
RPG's MACLisp timings.  However more of it is runtime and less is GC.
I conjecture that this will be typical of Lisp programs whose only
arithmetic involves small integers.  MACLisp will produce better
code for small integers, but will have to box and unbox them when returning
from functions or putting them into data structures, causing faster
runtime but more GC time. The first call is no different than others
because in R/UCI Lisp there is no automated expansion.  We had to
explicitly expand free storage by 50K before running.

Elisp (extended addressing Lisp) does not yet have a compiler.
Therefore some of the system functions (e.g. MEMBER, CADR) are running
interpreted.  This slows things down noticably.  To get an idea of how
Elisp is going to work out, I compared it with a copy of R/UCI Lisp in
which the same functions are being interpreted.  [The temptation is
great to simply code these things in assembly language, since there are
really only a few at this point.  However I will attempt to resist this
temptation and continue to compare Elisp with this semi-interpreted
R/UCI Lisp.]  Elisp uses dynamic expansion and contraction of memory.
However there is no apparent difference between the first time and
other times (possibly because memory has contracted to its initial
state by the end).

Elisp:
  interpreted:  28.5  (E-box time, as used by RPG, 20.1 sec.)

R/UCI Lisp (with the same functions interpreted):
  interpreted:  32.6

So Elisp is (as usual) a small amount faster than R/UCI Lisp.  This
suggests that a good prediction for the final version of Elisp is 14
sec. interpreted.  I am not ready to make predictions for Elisp compiled
code, as we don't know how the pager refill problem is going to affect
it.  My guess is that it will be slightly slower than R/UCI Lisp, with
slowdown due to pager refills (from extended addressing) somewhat offset
by the better code generated by Utah's compiler.

Note that I normally report "normal" CPU time, not E-box times as used
by RPG. The E-box times will be noticably smaller in the case of Elisp.
I regard the use of E-box time with Elisp as problematical, since it
is significantly smaller than conventional CPU time, even with 0 load
on the system.  I think this shows that E-box time omits some sort of
overhead that Elisp generates, probably pager refills (though the
documentation says that refills are not counted).  Until someone convinces
me otherwise, I will regard conventional CPU time as a better index of
Elisp's load on the system.  I report CPU times with fairly light (1 to 2)
load averages.

-------

∂06-Apr-81  2007	RPG  
To:   lisptranslators at SU-AI   
Here's the real timing stuff. As I pointed out, the old RUNTIME
(which is what RUNTIME in MacLisp here gives) is EBOX time (excluding
all memory time). Also, COMPILE the functions. The initial run
on SAIL does many CORE UUOs, which are some page creation stuff on ITS,
so it may run very slowly. The total runtime here at SAIL is given
by adding the GCTIME and WTIME entries below. 

This program is henceforth called: ``SAIL constraint combinatorial pairing
program'' or SCCPP.
			-rpg-
RUNTIME = EBOX time
WTIME = EBOX  + memory time (approx.)

2592 			;number of pairings
(RUNTIME 1.969) 	;EBOX time in seconds
(GCTIME 29.914) 	;GCTIME in seconds
(WTIME 25.8693333) 	;EBOX + MEMORY time in seconds

2592 
(RUNTIME 1.903) 
(GCTIME 2.616) 
(WTIME 5.334) 

2592 
(RUNTIME 2.008) 
(GCTIME 2.61) 
(WTIME 5.59000003) 

2592 
(RUNTIME 1.959) 
(GCTIME 2.065) 
(WTIME 5.86833334) 

2592 
(RUNTIME 1.904) 
(GCTIME 1.921) 
(WTIME 5.1623333) 

∂07-Apr-81  0924	RPG  	Rules    
To:   lisptiming at SU-AI   
I have sent out the first benchmark, and already there are a number
of issues that need to be faced. First is that some systems (SAIL,
for instance) only reliably report EBOX time (excluding memory
time). Fortunately SAIL does make available some form of EBOX + MBOX
time, though it is unreproducible. When possible I want to see the most
information you can give me with as many distinctions as possible. If your
system can give a breakdown of memory references, cache write-through time,
page fault time, EBOX time,... please give me that. When I send out the
benchmarks I include the SAIL times so that you can get some idea of how
long it all takes. From now on I will provide EBOX time, EBOX + MBOX time,
and GCTIME. Because I only provide that does not mean that is all I want to
see.

Slightly more importantly is the issue of `cheating'. If the sources of 
benchmarks wish to allow specializations of their programs to the test data,
they should make remarks to that effect. If someone cares to make such 
specializations they must be cleared by the author and me. This isn't because
I like to be in control so much as I want to understand what is being gained
by the specialization and what features of the target LISP make such 
specializations necessary and/or desirable.

For example, in the first benchmark several of the functions are implicit 
LEXPRs, which in MacLisp means that there are more than 5 arguments. This
means that the arguments are passed on the stack rather than through registers.
Since this takes longer than the register convention (in this case) I want that
feature timed. In the test data I sent out, some of the arguments are provably
constantly (). Chuck Hedrick at Rutgers (cleverly) noticed this and specialized
the functions. I want to specifically disallow that specialization (since the
LISP he had allows LEXPRs). [So do it again, Chuck.]

			-rpg-

∂07-Apr-81  1323	JONL at MIT-MC (Jon L White) 	Proposed ''mini'' benchmark, with interpretation. 
Date:  7 APR 1981 1611-EST
From: JONL at MIT-MC (Jon L White)
Subject: Proposed "mini" benchmark, with interpretation.
To: lisptiming at SU-AI

It hardly seems appropriate to run timing tests without including a word
or two about the discussion last fall which generated (in Masinter's words) 
so much more "heat" rather than "light", namely the TAK function sent my way 
in September 1980 by Mr. Shigeki Goto of the Electical Communication 
Laboratories, Nippon Telegraph and Telephone Co., in Tokyo.

  (DEFUN TAK (X Y Z)
	(COND ((GREATERP X Y)
	       (TAK (TAK (SUB1 X) Y Z) 
		    (TAK (SUB1 Y) Z X)
		    (TAK (SUB1 Z) X Y) ))
	      (T Y) ))
  The test case, named TARAI-4, is to measure the timing of (TAK 4 2 0)

The value of this trivial function can be seen, not in a competition
between lisps for "speed", nor in a  condemnation of one dialect for 
the "kludges" which must be performed in order to get such a trivial
thing to run reasonably fast, but rather in the analysis of the basic
issues which trying to time it brought out.  After receiving many responses 
from around the community,  I mailed out a note in which was discussed
what I thought were some fundamental issues, and I'd like to send that
note to this group for consideration.  The original note from Mr. Goto and 
a lengthy series of communications about the timings for his test case, 
especially from people in the Interlisp community, is in the file 
  JONL;GOTO NOTE
on the MIT-MC machine.

  Date: 22 October 1980 11:08-EDT
  From: Jon L White <JONL at MIT-MC>
  Subject: Response to Goto's lisp timings
  To: .  .  . 
      As Larry Masinter mentioned in his comment on the Goto
  timings, comparisons between LISPs are likely to generate 
  more heat than light;  but the replies did throw a little
  more light on some things, especially the runtime variabilities
  of an Interlisp program, and I thought I'd summarize them 
  and pass them along to the original recipients of the note.
  However, I'd like to say that the general concern with speed 
  which I've  encounterd in the past has been between MacLISP and 
  FORTRAN, rather than with some other lisp;  and several Japanese 
  research labs have done AI research still in FORTRAN.  
      Just in case you're put off by looking at even more meaningless
  statistics, I'd also like to aprise you that following the little 
  summary is a brief technical discussion of three relevant points 
  disclosed by the TAK function (out of the many possible points at 
  which to look).  These technical points may be new to some of you, 
  and even beyond the LISP question you may find them useful; the key 
  words are (1) UUOLINKS, (2) Uniform FIXNUM representation, and 
  (3) Automatic induction of helpful numeric declarations by a compiler.

      Almost everyone familiar with Interlisp recognized that
  the ECL people had not requested "block" compilation in the TARAI-4
  example, and several persons supplied results from various
  20/60's around:
				   default compilation 	rewritten code, with
	 correspondent 		       timings 		block-compiled timing
     Date: 19 OCT 1980 2127-PDT		  9.8ms		    1.8ms
    From: MASINTER at PARC-MAXC2
     Date: 20 Oct 1980 1039-PDT		  16.ms		    2.ms
    From: CSD.DEA at SU-SCORE (Doug Appelt)
     Date: 20 Oct 1980 at 2257-CDT	   0.83ms    (for UCILISP only)
    From: tyson at UTEXAS-11 
    <Goto's original timings on ICILISP>     2.9ms
    <Goto's original timings on Interlisp>  15.0ms
    <myself, for MacLISP on 20/50)	   0.556ms

  There seems to be some unexplained discrepancy between Tyson's timing 
  and that of Goto, as well as between Masinter's and Appelt's default-
  compilation timings; but the "best-possible" Interlisp timings for
  a re-written function (replacing GREATERP by IGREATERP) and using
  the "right" block declarations seem consistent at around 2ms.  Indeed,
  as Shostack suggest in his note of "20 Oct 1980 1036-PDT" there is
  quite a bit of variablity in timing Interlisp functions depending on
  just the right set of declarations etc (even for such a simple function).
      A point which, however, seems to be missed is that the notion of
  "block" compilation requires a decision at compile-time as to what 
  kind of function-linkage would be desirable (I presume that spaghetti-
  stack maintainence is the worst offender in runtime slowdown here);
  by comparison, the decision between fast and slow function linkage
  in MacLISP is made dynamically at runtime, so that only one kind of
  compilation be needed.  Indeed, by not burdening the novice with the
  understanding of yet one more inscrutable compilation question
  ("block" versus what?), the novice needn't be unduly penalized for
  not becoming a "hacker"; the above timings show a penalty of a factor 
  between 5 and 10 for ignoring, or under-utilizing, the "block" question.  


  (1) UUOLINKS:
      The following strategy, which we call the UUOLINKS hack, may have 
  first been introduced into the old LISP 1.6:  
      Arguments are set up for passing to a function and an instruction 
	  in a specially-managed hash table is XCT'd.
      In case a fast link is acceptable, the first usage of this linking
	  will convert the hash entry to a PUSHJ P,...  --  if not
	  acceptable, it will remain a slow interpretive route.
      Two copies of the hash-table are kept -- one is  never altered by
	  the linker, so that at any given point in time, all the "fast"
	  links may be restored to the unconverted slow interpretive route
	  (which may yet again be "snapped" to fast).
      Typically, a hash table size of 512. is satisfactory, but some
	  applications require 1024. or more (in particular, MACSYMA).
  Indeed as Boyer (@SRI-KL) mentioned in his note of "21 Oct 1980 2055-PDT", 
  the fast route -- whether by Interlisp's block compiler, or by MacLISP's
  runtime "snapper" -- does not leave much debugging help lying around
  on the stack; at least with this UUOLINKS approach, one can make the
  decision while in the middle of a debugging session, without penalty.
  The time cost of using the slow linkage seems to be a factor of between
  2 and 5.

  (2) Uniform FIXNUM representation
      Many years ago we changed MacLISP's representation of FIXNUM so
  that it would be uniform;  unlike the other PDP10 lisps with which I
  am familiar, we do not code some fixnums (small ones) as "immediate" 
  pointers and others (larger ones) as addresses.  Also, there is a
  read-only page or two which "caches" fixnum values of about -300. to
  +600., so that number consing of small numbers won't actually be 
  allocating new cells; e.g. interpreting a form like 
	  (DO I 0 (ADD1 I) (GREATERP I 100.) ...)
  Although I took a lot of flak for flushing the INUM scheme in favor
  of the uniform scheme, consider the advantage for compilation strategy,
  as seen in these representative code sequences for (IGREATERP X Y):
    INUM scheme:		MOVE A,-3(P)
			  JSP T,UNBOX
			  SAVE TT,somewhere
			  MOVE A,-4(P)
			  JSP T,UNBOX
			  CAME TT,somewhere
			  ...
    Uniform scheme:	MOVE TT,@-3(P)
			  CAME TT,@-4(P)
			  ...

  (3) Automatic induction of helpful numeric declarations by a compiler.
      As Masinter and Boyer pointed out, most Interlisp programmers 
  would probably be using "IGREATERP" rather than "GREATERP" (the MacLISP 
  correspondent is "<" ).  But a compiler can accumulate static information
  and do this change automatically;  at the very least, it could give
  out warning checks such as "Floating-point argument used with FIXNUM-only
  operation".  Providing the capability for compile-time typing of variables
  is probably the only way to meet the FORTRAN challenge -- which must be
  met since much useful AI research needs both symbolic and numeric
  capabilities.  Larry's MASTERSCOPE is a very similar sort of automated
  induction scheme.
 

∂10-Apr-81  1051	HEDRICK at RUTGERS 	Re: Rules      
Date: 10 Apr 1981 1301-EST
From: HEDRICK at RUTGERS
Subject: Re: Rules    
To: RPG at SU-AI
cc: lisptiming at SU-AI
In-Reply-To: Your message of 7-Apr-81 1224-EST

OK, but you will notice that I was also the only person who got the
thing done by the deadline you specified.  If we are going to end up
doing major conversions for each test, and furthermore if conversions
are going to have to be approved by you, you may find fewer volunteers
than originally planned. The reason for eliminating the extra args was
of course that turning the things into LEXPR's would be a pain in the
neck.  This is because in UCI Lisp LEXPR's do not refer to arguments by
name, but as (ARG x).  I can obviously write a program to do this, but
that was not feasible in the amount of time I had before the meeting.
Furthermore, it looked to me like the functions that had more than 5
arguments were used for preparing the data, but that only PAIRS1
and PAIRS2 were actually called large numbers of times.  Now that the
issue has been brought up, I will of course test LEXPR's, but would
be surprised if there is any change in performance.

Even if LEXPR's change the performance, I am not sure that is the right
way to do the conversion to UCI Lisp. It would be very unusual for a UCI
Lisp programmer to use an LEXPR in order to handle more than 5
arguments. They are normally used for indefinite arguments, when it
makes sense to number them.  If named variables are replaced with (ARG 1),
(ARG 2), ..., this obviously makes the program somewhat opaque.  Another
possible method is to lambda-bind variables to the required values and
refer to them globally.  While this is less than ideal, I claim that it
is better than (ARG n).  Making a list of the extra arguments is also
possible, but (CADR ARGS) is little better than (ARG 2).

At this point we start having to ask what the purpose of this project
is.  If it is to see how the dialects vary, then I claim that nothing is
accomplished by forcing us to convert into code that we would not in
fact write that way in the dialect. It seems to me that it is perfectly
legitimate for me to say that UCI Lisp simply does not support EXPR's
with more than 5 arguments, and that I would find some other way to do
my task.
-------

Groundrules (reprise)
I believe that at the outset of this project I stated what my goals
were, and the order of importance. But I will reiterate them anyway:

1. I want to provide for each major type operation (variable lookup,
function call,...) a relative, hopefully total, order on the various LISPs
and LISP systems around. I also hope that there is enough standardized
timing facilities around to at least get some idea of the approximate
absolute speeds as well.

2. I want to determine, myself, what the qualitative differences between
the various LISP systems around are. I hope to do this by watching how various
programs and constructs are handled by the translators. At first I hoped
that the ``translator volunteers'' would be just that, but now it seems I
will need to do most of the translations myself, which is ok as long as
I can merely provide the framework and not exact working code. If you
want a NIL/MacLisp person to propose the exact program whose timing is
universally reported as the performance of your favorite LISP system, then
I might be more willing to do everything myself.

3. Having ``rules'' is absolutely fair. First, one certainly cannot look
at specializations of a program to the data. Moreover, innate laziness (no
`major conversions' to quote Hedrick) dictates that the programs should be
examined as little as possible. Arguing style is totally irrelevant.
Suppose I wanted to test function call time and proposed factorial in its
recursive form, it is totally opposed to the spirit of what that tests to
translate it into an iterative program no matter what absolute style
considerations you bring to bear. One of the things I wanted to test 
with this program is how functions with more than 5 arguments behave
and are handled with different systems. This is totally fair.

As pointed out, the standard > 5 argument LEXPR conversion is

	(defun foo (a1 ... an) ...) =>
	(defun foo n ((lambda (a1 ... an) ...) (arg 0) ... (arg n)))

I was flip with Chuck because we spent 2 delightful years at Illinois
together a few years back.

4. Perhaps I should have sent out more help with this program, and
in the future I will, but another point of this benchmark was to test
the testing system. 

From now on I will provide with each benchmark a description of each
`primitive' in the program(s) that is not in McCarthy along with translation
tips for those facilities that are not universal. As time goes on and I
become truly familiar with all the systems, I will provide specific help
for each LISP.
			-rpg-
∂10-Apr-81  1205	George J. Carrette <GJC at MIT-MC> 	Rules    
Date: 10 April 1981 14:19-EST
From: George J. Carrette <GJC at MIT-MC>
Subject:  Rules
To: HEDRICK at RUTGERS
cc: lisptiming at SU-AI, RPG at SU-AI


I too was suprised at the chastising about breaking rules that
went on here considering its relevance that classic law of programming,
"if a function has more than three arguments then they are
 probably in the wrong order."

In point of fact, all the common so-called "lexprs," PROGN,
TIMES, PLUS, LIST, are specially handled by the compilers
of the lisp system's I'm familiar with.
Furthermore, the maclisp and lispm programs that have
used user-defined multi-argument constructions have invariably
developed in the direction of passing arguments by "keywords"
where these keyword are pre-processed in some form or another
at compile-time.

Ah, there are some interesting possible ways of optimizing
keyword argument calling in VAX NIL. Maybe we can talk about
things like this at some time.

-gjc

∂11-Apr-81  1001	CSVAX.jkf at Berkeley 	result of pairs benchmark on franz.  
Date: 11 Apr 1981 09:56:26-PST
From: CSVAX.jkf at Berkeley
To: rpg@su-ai
Subject: result of pairs benchmark on franz.

							pair benchmark results
							submitted by j foderaro
							(csvax.jkf@berkeley)
							10 april 81

Here are the results or running the pairs benchmark on Franz Lisp on 
a VAX 11/780 runing Berkeley 4BSD Unix.  The load average was less
than one when the timing was done.  Timing on Unix is done in 60ths of
a second and the time charged to a process includes some of the system
overhead for memory management.  I ran the benchmarks on an unloaded
system to reduce the interference of the memory manager.

The program was run with modification only to the test function
shown below.  Perhaps you should in the future use macros like (cpu-time) 
and (gc-time) and each site can define these macros.

(defun test ()
 ((lambda (t1 x gt)
	  (setq x (pairs a b () 'equal () () ()))
	  (setq t1 (- #-Franz (runtime) 
		      #+Franz (car (ptime)) 
		      t1))
	  (setq gt (- #-Franz (status gctime)
		      #+Franz (cadr (ptime)) 
		      gt))
	  (print (length x))
	  (print (list 'runtime
		       (QUOTIENT (FLOAT  (- t1 gt))
				 #-Franz 1000000.
				 #+Franz 60.)))
	  (print (list 'gctime
		       (quotient (float gt) 
				 #-Franz 1000000.
				 #+Franz 60.)))
	  #+Franz (terpri))
  #-Franz (runtime) #+Franz (car (ptime)) 
  ()
  #-Franz (status gctime)
  #+Franz (cadr (ptime))))


---
The size of the compiled file is 2768 bytes.

Here are the results::

Script started on Fri Apr 10 22:16:58 1981
Reval: lisp
Franz Lisp, Opus 34
-> (load 'pairs)
[fasl pairs.o]
t
-> (test)
2592(runtime 7.033333333333333)(gctime 23.53333333333333)
nil
-> (test)
2592(runtime 7.383333333333333)(gctime 4.816666666666667)
nil
-> (test)
2592(runtime 7.283333333333333)(gctime 4.366666666666667)
nil
-> (test)
2592(runtime 7.333333333333333)(gctime 4.666666666666667)
nil
-> (exit)

------
I looked at which functions were being called and it seems just about all this
benchmark does is call 'member' 10,000 times.  I noticed that in our system
'memq' would do as good a job as 'member' so I replaced member by memq
and ran it with these results:

Reval: lisp
Franz Lisp, Opus 34
-> (load 'memqpairs)
[fasl memqpairs.o]
t
-> (test)
2592(runtime 1.683333333333333)(gctime 23.55)
nil
-> (test)
2592(runtime 1.733333333333333)(gctime 4.833333333333333)
nil
-> (test)
2592(runtime 1.766666666666667)(gctime 4.35)
nil
-> (test)
2592(runtime 1.783333333333333)(gctime 4.7)
nil
-> (exit)
script done on Fri Apr 10 22:21:50 1981

∂13-Apr-81  1320	RPG  
To:   lisptranslators at SU-AI   
First, in SCCPP there are functions with 7 arguments. For example,
the first function starts out:

(DEFUN PAIRS 
       (X Y MUST-APPEAR FUN APPLY-CONSTRAINTS CONSTRAINTS
	  NIL-PAIRS) ...)

I suggest the following translation:

(DEFUN PAIRS n
       ((LAMBDA (X Y MUST-APPEAR FUN APPLY-CONSTRAINTS CONSTRAINTS
		  NIL-PAIRS) ...)
	(ARG 1)(ARG 2)(ARG 3)(ARG 4)(ARG 5)(ARG 6)(ARG 7)))

(*list a1 ... an) => (cons a1 (cons a2 ...(cons an-1 an)))

(*catch x y) evaluates the form y. x should EVAL to a tag. If y returns
normally, the value of the *catch is the value of y. If the evaluation
of y entails the evaluation of a form like (*throw q v) where q EVALs
to the same tag that x did, then v is evaluated and the value of the *catch
is the value of v. Unless, there is an intervening *catch with the same
tag...

MAPCAN is MAPCAR with NCONC instead of CONS.

1+, +, < etc are FIXNUM versions of ADD1, PLUS, LESSP etc.

(FUNCALL fun x1 ... xn) evaluates all of its arguments and
applies the value of fun to the arguments x1 ... xn. So
(FOO a b c d) = (FUNCALL 'FOO a b c d)

			-rpg-

∂13-Apr-81  1239	RPG  	Groundrules (reprise)   
To:   lisptiming at SU-AI   
I believe that at the outset of this project I stated what my goals
were, and the order of importance. But I will reiterate them anyway:

1. I want to provide for each major type operation (variable lookup,
function call,...) a relative, hopefully total, order on the various LISPs
and LISP systems around. I also hope that there is enough standardized
timing facilities around to at least get some idea of the approximate
absolute speeds as well.

2. I want to determine, myself, what the qualitative differences between
the various LISP systems around are. I hope to do this by watching how various
programs and constructs are handled by the translators. At first I hoped
that the ``translator volunteers'' would be just that, but now it seems I
will need to do most of the translations myself, which is ok as long as
I can merely provide the framework and not exact working code. If you
want a NIL/MacLisp person to propose the exact program whose timing is
universally reported as the performance of your favorite LISP system, then
I might be more willing to do everything myself.

3. Having ``rules'' is absolutely fair. First, one certainly cannot look
at specializations of a program to the data. Moreover, innate laziness (no
`major conversions' to quote Hedrick) dictates that the programs should be
examined as little as possible. Arguing style is totally irrelevant.
Suppose I wanted to test function call time and proposed factorial in its
recursive form, it is totally opposed to the spirit of what that tests to
translate it into an iterative program no matter what absolute style
considerations you bring to bear. One of the things I wanted to test 
with this program is how functions with more than 5 arguments behave
and are handled with different systems. This is totally fair.

As pointed out, the standard > 5 argument LEXPR conversion is

	(defun foo (a1 ... an) ...) =>
	(defun foo n ((lambda (a1 ... an) ...) (arg 0) ... (arg n)))

I was flip with Chuck because we spent 2 delightful years at Illinois
together a few years back.

4. Perhaps I should have sent out more help with this program, and
in the future I will, but another point of this benchmark was to test
the testing system. 

From now on I will provide with each benchmark a description of each
`primitive' in the program(s) that is not in McCarthy along with translation
tips for those facilities that are not universal. As time goes on and I
become truly familiar with all the systems, I will provide specific help
for each LISP.
			-rpg-

∂13-Apr-81  1338	CLR at MIT-XX 	Re: Groundrules (reprise)     
Date: 13 Apr 1981 1634-EST
From: CLR at MIT-XX
Subject: Re: Groundrules (reprise)   
To: RPG at SU-AI
cc: pdl at MIT-XX
In-Reply-To: Your message of 13-Apr-81 1539-EST

Dear rpg,
	I would like to if possible participate in some of the LISP timing
tests using MDL on the 20.  Unfortunately, MDL is similar to LISP (especially
in spirit) but vastly different in detail.  In order to (for instance) run
your first proposed benchmark, a gross translation will be required.  As
starting points, MDL has no LAMBDA, PROGN, LIST*,MAPC, MAPCAN...  All of 
these things can be accomplished in MDL but in a vastly different way.

	My question is whether I should participate in the timings under
these circumstances or should MDL drop out due to extreme differences.

-Chris Reeve
-------

I would like to see what MDL has to say about these issues. I don't object
to having languages that are not recognizably LISP as long as they are
appropriate languages for AI research. ``Appropriate'', I think, means
at least being programming environment based as opposed to the sort
of batch based environment that PASCAL-like languages encourage. In any event
at the minimum send me your conception of the first benchmark.
∂13-Apr-81  1724	YONKE at BBND 	Re: Groundrules (reprise)     
Date: 13 Apr 1981 2022-EST
Sender: YONKE at BBND
Subject: Re: Groundrules (reprise)   
From: YONKE at BBND
To: RPG at SU-AI
Message-ID: <[BBND]13-Apr-81 20:22:10.YONKE>
In-Reply-To: Your message of 13 Apr 1981 1239-PST

Dick, I agree with your summary (or reiteration) of your last
message.  (I can always write a program that makes my lisp look
good and under the right conditions make the others look like
shit, e.g. DREVERSE on Interlisp-Jericho is faster than a speeding
banana, but other things may be slow.)

Sorry we didn't get a chance to talk at the lisp meeting -- maybe
next time.  I'm going to be out of communication for the rest
of this month (sailing in the Virgin Islands).  So apologises
for the lack of messages.

Bye the way, are the timings "official" in the sense of being
sponsored by an agency or is this your "pet project"?

Martin

The project is currently a siphoning from other Stanford money,
but McCarthy wants to get some machine time and a grad student
to do some of the correlation.
				-rpg-
∂13-Apr-81  1934	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	Re: Groundrules (reprise)       
Date: 13 Apr 1981 2134-CST
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: Re: Groundrules (reprise)   
To: RPG at SU-AI
In-Reply-To: Your message of 13-Apr-81 1439-CST

Your "standard > 5 argument LEXPR conversion" is not correct in UCI-LISP
and, presumably, not in Hedrick's Lisp.  The compiler does not handle the
lambda with > 5 args whether it is a defined function or an internal
lambda.

Now I ask, where does your word "standard" come from?

Again, I'd like to point out that Hedrick's original program should be
ok.  He pointed out that he could not handle > 5 args and handled it in
a way that would not affect the timing in any significant way (I'd say
less than .1%).

By the way, I checked the LISP OARCHI file and found that the >5 args was
handled beginning in November 1972.


(While I was checking on that, I got your next message.  You still are thinking
that a compiler would have no trouble with 5 args to a Lambda even if it
can't handle 5 args to the Lambda of the function definition.  The answer, of
course, is to use nested LAMBDAs or a PROG and SETQs.  From the original
entry of 11/72 into LISP OARCHI it looks like MACLISP does the same thing when
it finds an internal LAMBDA with >5 args as it does with a function of >5 args.)
-------

Register/Stack Allocation of LAMBDA vars
It all depends on your compiler or at least on the level of technology
embraced thereby. The MacLisp compiler (and, I think, the InterLisp compiler)
both take internal LAMBDAs to mean a naming (or binding) of temporary
locations, normally on the stack but possibly in the registers. Modern
hardware (cache memories) blurs the necessity of register desirability
for speed of access. 

Also, register passing meant one didn't need to pass the number
of arguments as in the LEXPR case, where a stack passing protocol is
used, and where the adjustment of the stack is explicitly required.

Thus, there is a distinction between function interface LAMBDAs and
internal LAMBDAs in some compilers. Since there can be n active LAMBDA
variables at any one time, the need for stack allocation (binding)
is necessary in any compiler. The stack allocation, though, can be
done as a register save operation rather than as a pre-planned allocation,
so I guess you could have a reasonable compiler which register allocates
though I wouldn't have thought it was still done that way.
So, translate the > 5 case as best you can and tell me how you decided on it.
			-rpg-
∂13-Apr-81  2214	HEDRICK at RUTGERS 	Re: Groundrules (reprise)     
Date: 14 Apr 1981 0111-EST
From: HEDRICK at RUTGERS
Subject: Re: Groundrules (reprise)   
To: RPG at SU-AI
In-Reply-To: Your message of 13-Apr-81 1539-EST

The problem with your proposed paraphrase of expr's with > 5 args is
that it uses the construct that we are trying to avoid, namely a
lambda with > 5 args.  I am sure I can come up with something...

It seems that for your project to work, we are going to have to send
back the translation.  Else how can you judge all of what you want to
judge?  What I will probably do is build up a MACLisp conversion package
(assuming that many of the tests are in MACLisp - if you contemplate
doing each one in a different dialect things could get tense).  Any
preferences as to the form in which I send the translation? At this
point I can just send you the MACLisp package.  But as time goes on that
will get longer and longer and have less and less specific relevance to
any one test.  I do not contemplate rewriting the function in UCI Lisp,
but mostly defining the functions and then using the test in close to
its original form.  Possibly you will say that as long as I can implement
the constructs in the original, you don't really care to see how I do
it.  Is that the case?



-------

Misunderstanding
I guess what I said to Tyson (enclosed) is relevant. I was under the impression
that internal LAMBDAs could compile onto the stack directly, rather than
through a register-save operation. So my comments about the LEXPR conversion
simply did not apply to your system. But, I've learned something important
about your LISP, which is what I wanted to do.

From now on I hope to specify what it is that is tested with each benchmark
and be able to trust that you're doing the right thing, though I would like
to see the resulting code for the benchmark and/or the translation program.

What I may end up doing is to send out both the original benchmark
and my Maclisp translation to you in order to make things easier.

About LIST* (which I mistakenly called *LIST through a weird spoonerism)
I think we all ought to get straight which LISPs do macros, and when they
are to be used. For example, I think that LIST* is turned into the
CONS form and then compiled, so I recommend a macro version of it.

Attached message to Tyson:
Subject: Register/Stack Allocation of LAMBDA vars
It all depends on your compiler or at least on the level of technology
embraced thereby. The MacLisp compiler (and, I think, the InterLisp compiler)
both take internal LAMBDAs to mean a naming (or binding) of temporary
locations, normally on the stack but possibly in the registers. Modern
hardware (cache memories) blurs the necessity of register desirability
for speed of access. 

Also, register passing meant one didn't need to pass the number
of arguments as in the LEXPR case, where a stack passing protocol is
used, and where the adjustment of the stack is explicitly required.

Thus, there is a distinction between function interface LAMBDAs and
internal LAMBDAs in some compilers. Since there can be n active LAMBDA
variables at any one time, the need for stack allocation (binding)
is necessary in any compiler. The stack allocation, though, can be
done as a register save operation rather than as a pre-planned allocation,
so I guess you could have a reasonable compiler which register allocates
though I wouldn't have thought it was still done that way.
So, translate the > 5 case as best you can and tell me how you decided on it.
			-rpg-
∂21-Apr-81  1316	RPG  	SCCPP    
To:   lisptranslators at SU-AI   
I have only heard from SAIL, Multics, Franz, and Rutgers on the
first timing benchmark so far. Please send your results. To see
what people have done so far you can look at: 
	SAIL:RESULTS.TIM[TIM,LSP]
No password to FTP away or to TYPE it out.
			-rpg-

∂13-Mar-81  1959	MEEHAN at MIT-AI (James R. Meehan) 
Date: 13 MAR 1981 2132-EST
From: MEEHAN at MIT-AI (James R. Meehan)
To: RPG at SU-AI

I was "volunteered" into the LISP Timing Mailing List, but I'd like
to get off it if it requires reading the volumes of mail I've seen in
the last few days. 
  I'm in charge of UCI LISP and UCI MLISP (University of California at
Irvine) and I have more than a passing interest in LISP-related projects,
but my acount at MIT-AI is used perhaps once a week for checking mail
and messages, and [important] I do this at 300 baud. If you can send
some of the correspondence by US mail (as opposed to all the day-to-day
stuff), I'd be interested. 
  I should also put in a pitch for mentioning your project in the
SIGART Newsletter, which is read by about as large a LISP audience
as anything. 
 Cordially -  Jim Meehan

∂31-Mar-81  1615	Deutsch at PARC-MAXC 	Re: Timing Benchmarks  
Date: 31 Mar 1981 16:14 PST
From: Deutsch at PARC-MAXC
Subject: Re: Timing Benchmarks
In-reply-to: RPG's message of 31 Mar 1981 1451-PST
To: Dick Gabriel <RPG at SU-AI>

It is vitally important that timing tests NOT be limited to made-up code
fragments, but include systems which are in heavy use NOW.  We got burned
very badly on Interlisp-D because of completely erroneous ideas about what
things actually got used a lot.

Right, but I'm having trouble getting people to contribute benchmarks!
			-rpg-
∂21-Apr-81  1604	Greenberg.Symbolics at MIT-Multics 
Date:  21 April 1981 19:04 est
From:  Greenberg.Symbolics at MIT-Multics
To:  Dick Gabriel <RPG at SU-AI>
In-Reply-To:  Message of 21 April 1981 15:58 est from Dick Gabriel

Well, since I sent you these results, I have done some more experimentation.
The MIT-AI results can be written off as user load. Yes,
indeed, of course those files were compiled.   I am afraid that
I will have to stand by those numbers and conclude that consing
is DAMN SLOW.  The -10 can allocate a cons in 1 XCT; a routine of
some length is involved on Multics. Oh well. That's why I
work with Lisp Machines now.

∂07-Apr-81  1037	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	Re: Rules        
Date:  7 Apr 1981 1230-CST
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: Re: Rules    
To: RPG at SU-AI
cc: hedrick at RUTGERS

Not being totally familiar with MACLISP, I didn't realize that MACLISP
automatically made LEXPRs out of EXPRs with 6 or more arguments.  UCI-Lisp
(which is presumably close to what Hedrick is running) does have LEXPRs
but does not provide for automatic conversion.

What I did, and I think it is valid, was to make the last three args into
a list.  Any argument with that technique?  I don't separate the list
except where I need it.

It appears to me that the major portion of the time is spent in PAIRS1
rather than in the sections of code that involve the >5-argument functions.
The difference in argument passing for them should be swamped by the
recursion in PAIRS1.   (I just ran a test and 97% of the time is spent in
PAIRS1 (interpreted).)
-------

∂07-Apr-81  1107	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	Rules - GC time  
Date:  7 Apr 1981 1259-CST
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: Rules - GC time
To: rpg at SU-AI

The first times that I ran SCCPP, I arbitrarily chose to use approximately
30000 (decimal) free space.  The ratio of runtime versus GC time was
approximately the same as what you had supplied.  However I later ran it
with twice that space.  I was now getting exactly one GC per test (each
recovering approximately the same amount of space).  My total GC time
had dropped a factor of 3 1/2.

UCI-Lisp uses the standard mark and sweep routine.  Obviously there is
an overhead associated with each GC.  Furthermore, a GC during which
most of the core is tied up is more costly than one in which free space
is almost empty.  Thus GC time required for a problem is a hard point
to pin down.

If I were to set my free space up so that I totally filled memory, I
would have less than one GC per run on the average.  If I used more free
space than physical core would allow (not possible on our 20 but a problem
on our 10), I would swap GC time for paging time.  This would seem to be
unfair as paging time could be considered either the jobs cost or system
overhead (like swapping).  On our 10, increasing core size to beyond
the physical space dramatically increases run time because of paging costs.

It might be a reasonable idea to specify a maximum amount of free space
(or should it be total core used?) in which a program can be run.  This
may not be possible for the Lisp machines.  An alternative idea would be
to adjust core size so that you get exactly one GC per test.  Suppose that
you start off with a fresh free space and run the problem, GCing only when
it was done.  This would count the cost to collect the words used once
without counting what it would cost to track through the temporary data
during the running of the program (which is dependent on when the GC
happens).   I feel this would be a reasonable comparison to the costs
associated with having a parallel GC or a machine with such a large
address space so as to never have a GC.
-------

∂07-Apr-81  2213	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	SCCPP on UCI-Lisp
Date:  8 Apr 1981 0001-CST
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: SCCPP on UCI-Lisp
To: rpg at SU-AI

		Results of LISP timing tests for UCI-Lisp

(Times are in the form R+G where R is the runtime (not including GC time)
and G is the GC time.  All times are in seconds.  "Interp" means interpreted,
"Slow" means compiled but using UUO-links, "Fast" means compiled with the UUO
links replaced by direct jumps.)

				   Processor
Program			KL-2060			KI-1060
	   Interp	Slow	  Fast	     Interp	  Slow		Fast

SCCPP:
 Free:
  30000	 14.00+2.78  5.32+2.38	3.38+2.78   53.43+8.58  21.14+11.62 12.77+11.56
	 14.04+2.83  5.37+2.70	3.35+2.71		20.40+11.47 12.63+11.36
				3.34+2.70		20.72+11.50 12.71+11.44

  60000	 14.60+0.58  5.40+0.50	3.35+0.53   52.80+1.42	21.18+1.45  12.81+1.39
	 14.20+0.64  5.44+0.52	3.36+0.53		21.27+1.38  12.34+1.43
	 14.09+0.63  5.37+0.52	3.35+0.52		21.19+1.40  12.93+1.40
	 14.22+0.61  5.35+0.52	3.40+0.53

  Notes: The functions with more than 5 arguments were changed to 5 argument
functions by making the last 3 into a list.  These were involved in less than
3% of the run time so the difference is insignificant.  The timings on the
2060 were with a load average less that 1.  The timings on the KI-10 were with
a moderate load.  The differences in the various timing runs are probably due
to system overhead due to swapping.  The 30K free space examples had about 5
or 6 GC's while the 60K free space examples had one GC each.
-------

∂21-Apr-81  2018	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	Lost mail⊗? 
Date: 21 Apr 1981 2217-CST
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: Lost mail⊗?
To: rpg at SU-AI

Enclosed is a set of all my messages to you.  Apparently some of them (in
particular the results I sent you just before the Lisp meeting) got lost.
I checked TIMING.MSG[TIM,LSP] and only found two msgs from me in there.
I also saw that one of them apparently had been replied to - but I never got
a reply.  I don't know what is going on, but please acknowledge receipt of
this.
 6-Apr-81 16:05:21-CST,582;000000000001
Date:  6 Apr 1981 1605-CST
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: Re: Timing benchmark   
To: RPG at SU-AI
In-Reply-To: Your message of 6-Apr-81 1502-CST

I suggest that each program submitted for everyone to run get a unique
name so we can describe it simply.   Also, please name the language
in which it was written.

Also, it wasn't clear from your message whether the code got compiled or
ran interpreted for the times you got.  (Only from looking at the code,
I'd guess it ran interpreted since there was nothing that seemed to compile
it.)
-------
 7-Apr-81 12:30:27-CST,911;000000000001
Date:  7 Apr 1981 1230-CST
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: Re: Rules    
To: RPG at SU-AI
cc: hedrick at RUTGERS

Not being totally familiar with MACLISP, I didn't realize that MACLISP
automatically made LEXPRs out of EXPRs with 6 or more arguments.  UCI-Lisp
(which is presumably close to what Hedrick is running) does have LEXPRs
but does not provide for automatic conversion.

What I did, and I think it is valid, was to make the last three args into
a list.  Any argument with that technique?  I don't separate the list
except where I need it.

It appears to me that the major portion of the time is spent in PAIRS1
rather than in the sections of code that involve the >5-argument functions.
The difference in argument passing for them should be swamped by the
recursion in PAIRS1.   (I just ran a test and 97% of the time is spent in
PAIRS1 (interpreted).)
-------
 7-Apr-81 12:59:51-CST,2112;000000000001
Date:  7 Apr 1981 1259-CST
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: Rules - GC time
To: rpg at SU-AI

The first times that I ran SCCPP, I arbitrarily chose to use approximately
30000 (decimal) free space.  The ratio of runtime versus GC time was
approximately the same as what you had supplied.  However I later ran it
with twice that space.  I was now getting exactly one GC per test (each
recovering approximately the same amount of space).  My total GC time
had dropped a factor of 3 1/2.

UCI-Lisp uses the standard mark and sweep routine.  Obviously there is
an overhead associated with each GC.  Furthermore, a GC during which
most of the core is tied up is more costly than one in which free space
is almost empty.  Thus GC time required for a problem is a hard point
to pin down.

If I were to set my free space up so that I totally filled memory, I
would have less than one GC per run on the average.  If I used more free
space than physical core would allow (not possible on our 20 but a problem
on our 10), I would swap GC time for paging time.  This would seem to be
unfair as paging time could be considered either the jobs cost or system
overhead (like swapping).  On our 10, increasing core size to beyond
the physical space dramatically increases run time because of paging costs.

It might be a reasonable idea to specify a maximum amount of free space
(or should it be total core used?) in which a program can be run.  This
may not be possible for the Lisp machines.  An alternative idea would be
to adjust core size so that you get exactly one GC per test.  Suppose that
you start off with a fresh free space and run the problem, GCing only when
it was done.  This would count the cost to collect the words used once
without counting what it would cost to track through the temporary data
during the running of the program (which is dependent on when the GC
happens).   I feel this would be a reasonable comparison to the costs
associated with having a parallel GC or a machine with such a large
address space so as to never have a GC.
-------
 8-Apr-81 00:01:56-CST,1506;000000000001
Date:  8 Apr 1981 0001-CST
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: SCCPP on UCI-Lisp
To: rpg at SU-AI

		Results of LISP timing tests for UCI-Lisp

(Times are in the form R+G where R is the runtime (not including GC time)
and G is the GC time.  All times are in seconds.  "Interp" means interpreted,
"Slow" means compiled but using UUO-links, "Fast" means compiled with the UUO
links replaced by direct jumps.)

				   Processor
Program			KL-2060			KI-1060
	   Interp	Slow	  Fast	     Interp	  Slow		Fast

SCCPP:
 Free:
  30000	 14.00+2.78  5.32+2.38	3.38+2.78   53.43+8.58  21.14+11.62 12.77+11.56
	 14.04+2.83  5.37+2.70	3.35+2.71		20.40+11.47 12.63+11.36
				3.34+2.70		20.72+11.50 12.71+11.44

  60000	 14.60+0.58  5.40+0.50	3.35+0.53   52.80+1.42	21.18+1.45  12.81+1.39
	 14.20+0.64  5.44+0.52	3.36+0.53		21.27+1.38  12.34+1.43
	 14.09+0.63  5.37+0.52	3.35+0.52		21.19+1.40  12.93+1.40
	 14.22+0.61  5.35+0.52	3.40+0.53

  Notes: The functions with more than 5 arguments were changed to 5 argument
functions by making the last 3 into a list.  These were involved in less than
3% of the run time so the difference is insignificant.  The timings on the
2060 were with a load average less that 1.  The timings on the KI-10 were with
a moderate load.  The differences in the various timing runs are probably due
to system overhead due to swapping.  The 30K free space examples had about 5
or 6 GC's while the 60K free space examples had one GC each.
-------
13-Apr-81 21:34:35-CST,1298;000000000001
Date: 13 Apr 1981 2134-CST
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: Re: Groundrules (reprise)   
To: RPG at SU-AI
In-Reply-To: Your message of 13-Apr-81 1439-CST

Your "standard > 5 argument LEXPR conversion" is not correct in UCI-LISP
and, presumably, not in Hedrick's Lisp.  The compiler does not handle the
lambda with > 5 args whether it is a defined function or an internal
lambda.

Now I ask, where does your word "standard" come from?

Again, I'd like to point out that Hedrick's original program should be
ok.  He pointed out that he could not handle > 5 args and handled it in
a way that would not affect the timing in any significant way (I'd say
less than .1%).

By the way, I checked the LISP OARCHI file and found that the >5 args was
handled beginning in November 1972.


(While I was checking on that, I got your next message.  You still are thinking
that a compiler would have no trouble with 5 args to a Lambda even if it
can't handle 5 args to the Lambda of the function definition.  The answer, of
course, is to use nested LAMBDAs or a PROG and SETQs.  From the original
entry of 11/72 into LISP OARCHI it looks like MACLISP does the same thing when
it finds an internal LAMBDA with >5 args as it does with a function of >5 args.)
-------
13-Apr-81 21:46:53-CST,435;000000000001
Date: 13 Apr 1981 2146-CST
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: Timing msg on last msg from me
To: rpg at SU-AI

When I sent my last msg to you (a few minutes ago), I didn't realize that
I was getting messages then that you had sent some 7 hours earlier.
Our system was down so we were just getting the mail.  This should explain
my comment about getting a msg from you while I was typing that one in.
-------
-------

∂06-Apr-81  1204	RPG  
 ∂03-Apr-81  1531	ML   
 ∂02-Apr-81  2346	KASHTAN at SRI-AI 	Re: franzlisp   
Date:  2 Apr 1981 2348-PST
From: KASHTAN at SRI-AI
Subject: Re: franzlisp 
To: ML at SU-AI
In-Reply-To: Your message of 2-Apr-81 2301-PST

1) Does franzlisp pose any restrictions of its own on the address space
	available to a user?
		Yes and no.  Since franz lisp uses a segment based allocation
		scheme and a bitmap for marking data structures during garbage
		collection there are static arrays (for the bitmap and for
		segment descriptors) that place an upper bound on the size
		of the franz lisp address space.  It is not a hard limit,
		though.  Re-compiling the interpreter with larger static
		data structures will allow you to use a larger address space.
		I think the default maximum is around 5 Mbytes.  Where you
		are going to really run into trouble is in the virtual address
		space which Unix gives you.  There was a design flaw in the
		vax memory architecture (driven by the fact that VAXen had
		to originally run with only 128Kb memory) which made the page
		size very small (512 bytes).  To get around the horrendous size
		of page tables required for this DEC went to a 2 level paging
		scheme and paged the user's page tables. Unfortunately, UNIX
		does not page the user page tables.  The difference here is
		that while you would normally (if paging the page tables) only
		require 4 bytes of resident physical memory for every 64Kb of
		virtual memory -- on UNIX you require 516 bytes of resident
		physical memory for every 64Kb of virtual memory.  This adds
		up very quickly.  So you can pretty much rule out humungous
		(eg 128Mbyte) address spaces on your VAX for the time being.
2) Does it run resonably fast (either under unis or unix), particularily
	the compiled code?
		If you are comparing compiled code to what you are used to in
		MACLISP, no (particularly for heavy arithmetic computation).
		We tried to use Franz Lisp for some Image Analysis stuff and
		it just was too slow to be usable.  There have been some
		recent fixes to the compiler to improve performance in array
		accessing and arithmetic computation but these have really
		not been sufficient for our purposes.  I think we are now
		betting on NIL (which should port to Unix quite trivially).
3) How difficult is it to write a driver for a device hanging off the
	unibus on a dr11-b(this is a dma device, we would be interfacing
	a grinell) for the operating system?
		Grinell's on dr-11b's are absolutely trivial.  There are
		probably 1/2 dozen Unix drivers (all working) available to
		run your Grinnel through a dr-11b.  Your best bet would be
		Mike Accetta (accetta@cmua).
4) What are your general impressions of franzlisp as a workable system
	on a vax?
		If you are really interested in finding out about Franz Lisp
		as a system building tool on the VAX, I would suggest talking
		to Andy Witkin (witkin@sri-ai).  He is the resident MACLISP
		person (ex-MIT vision person) around here and has tried to
		use Franz Lisp as a MACLISP substitute on the VAX.


Sorry to be presenting such a generally dim view of the world.  The VAX is
just starting to mature as a research tool -- things are still kind of bare.
On the bright side,  you will definitely want to be using Gosling's EMACS for
the VAX.  A very winning editor!!

David
-------

∂14-Apr-81  2031	RPG  
To:   RPG at SU-AI, jonl at SU-AI
 ∂14-Apr-81  2022	HEDRICK at RUTGERS 	revised (final⊗?) version of report on Lisp conference 
Date: 14 Apr 1981 1237-EST
From: HEDRICK at RUTGERS
Subject: revised (final⊗?) version of report on Lisp conference
To: eis at MIT-AI, geoff at SRI-KL, boyer at SRI-KL, engelmore at USC-ISI,
To: dreifus at WHARTON-10
Redistributed-To: Bboard at SRI-CSL, bboard at SCORE, bboard at SAIL
Redistributed-By: GEOFF at SRI-CSL
Redistributed-Date: 14 Apr 1981

I have received a number of comments on my earlier report.  Here is a
revision that incorporates most of them.  It is still quite informal
in tone, and should probably not be circulated outside the ARPAnet
community.

---------------------------------------------------------------------
This is a slightly revised report on the Lisp conference at SRI.  It
has been modified to reflect some of the comment of people who read
the first draft. The conference was called by ARPA to discuss the future
of Lisp in the AI community, as well as the proposal for ARPA to buy
Dolphin computers for use within this community.  It lasted from 8:30 am
to about 10pm on 8 April.

One of the major concerns that motivated the meeting was a feeling that
there are suddenly many projects to reimplement Lisp, and that we may
end up with many half-baked, incompatible implementations.  There had
been some hope for getting some coherence among them.  As far as I can
see, these fears are somewhat justified, and little was accomplished
towards creating coherence.  There were in fact 13 Lisp implementation
projects listed.  (Some of these have been finished, however, so that
number is a bit too big.)  Fortunately, none of them are creating new
languages.  Rather they are reimplementations of existing dialects for
new hardware.  Thus there is somewhat less chaos than the number 13
would imply.  Here they are.  All results are publically available
unless stated otherwise.

Interlisp:
   SRI portable Interlisp [SRI].  not yet funded.  projected to take 18
	months once it is funded.  They are thinking of the VAX, F-5, or
	any 32-bit machine that they can get funding for.
   Interlisp-D [Xerox].  For Dophin, a Lisp machine with bit-mapped
	display.  Finished.
   Interlisp-Jericho [BBN].  For BBN's Jericho, a high-performance
	personal computer.  in progress, projected to be ready May,
	1982.  I believe results will be proprietary.
   VAX Interlisp [USC-ISI].  They hope to have the "virtual machine"
	(i.e. the lowest level of the interpreter) running by June, but
	it will be up to a year before the whole environment is working.
	This is the most critical of all the projects as far as most
	users are concerned. 
MACLisp:
   Lisp machine [MIT].  A version of Lisp extended to make heavy use of
	bit-mapped display, and having a number of new language
	features. Finished some time ago, but development continues.
	Results are proprietary, with MIT licensing two commerical
	companies to distribute them.
   NIL [MIT].  Intended to have everything that the Lisp machine has
	that will be useful on a conventional machine.  Also
	high-performance compiler.  Will have something by end of the
	summer, but development will be ongoing.  Mainly intended for
	VAX, but probably will be done for extended-addressing 20.
   S1-NIL.  NIL for the S1.  This is a multi-CPU supermachine sponsored
	by the military (Navy?).  Projected to be ready in about 2.5
	years.
   Spice Lisp [CMU].  Dialect of MACLisp for personal computers.  Will
	use microcode assist.  First implementation will be on VAX and
	extended addressing DEC-20 by simulating the proposed microcode.
	It is unclear whether the VAX and DEC-20 versions will be usable
	Lisps or not. Officially, they are intended mainly for debugging
	until the personal machine  hardware is available.  However they
	obviously have hopes that these will be usable in their own
	right. [see below for comments on this] Projected to be ready in
	early 1982.
   Franz Lisp [Berkley].  MACLisp dialect for VAX.  finished.  Many
	people seem to be unenthusiastic about this, but it seems to be
	a solid implementation.  Maybe a trifle slower than it might be
	and somehow not as "sexy" as a typical MIT product.
Other dialects:
  Standard Lisp (Utah) - This is really a research project in
	portability. They are trying to write as much of Lisp as
	possible in Lisp. The compiler has extensions to allow it to be
	used for system programming.  Currently a very small part is
	still written in assembly language.  They should have an
	implementation for extended-address DEC-20 within 6 months.
  Elisp (Rutgers) - This is a recoding of R/UCI Lisp for extended
	addressing DEC-20.  This should be finished by the end of the
	summer.
  MDL [MIT] - This is not really a Lisp dialect.  It is intended as a
	successor to Lisp.  It has more data types and has been
	generally cleaned up.  they are working on a portable
	implementation.  There has been a DEC-20 implementation for
	years.  They now have an implementation that makes some use of
	extended addressing.  when the portable implementation is
	finished, it will be used to bring up a fully extended version
	for the DEC-20.  This is projected to be in 6 months.

Of all these, the project that generated the most interest was clearly
the VAX Interlisp.  Many people are committed to both the VAX and
Interlisp, and are in bad shape until this happens.

Now some comments as to why there are 13 projects.  I will not comment
much on the "other dialects".  MDL is its own thing, and a portable
implementation for it makes perfect sense.  Similarly, Utah's research
is a very interesting project.  In my opinion it is a more promising
approach than those being used by the Interlisp or MACLisp people, and
these folks would have been well advised to be cooperating more closely
with Utah than they are.  Our project is strictly a short-term fix to a
critical problem, and requires minimal effort.  That is its only
possible justification.  In the long run we will probably want to choose
a dialect that is going to be available on personal machines.

Now for the Interlisp and MACLisp projects.  They are all attempts to
implement existing languages for new hardware.  In the case of MACLisp
they also are trying to clean up and develop the language further.  

The Interlisp projects are coordinated with each other, at least in the
sense that they are all implementing the same language.  Also, much of
the Lisp code is shared.  However apparently the Interlisp "virtual
machine" (i.e. the part done in assembly language) is relatively large,
and the user interface (e.g. debugger) depends upon details of the stack
that are different for different machines.  Thus transporting Interlisp
is a fairly major project.  As far as I can see, the projects other than
SRI's were reimplementations of the virtual machine for particular
hardware, with no particular thought given to portability.  I think SRI
hopes to improve this.  The usefulness of this project will be affected
by who sponsors them, since certain funding arrangements could result in
a proprietary product.  There is a single manual that applies to all
Interlisp versions.  [This is more important than it sounds.]

The MACLisp projects are not particularly coordinated.  Each of them is
implementing different dialects, with separate (or non-existent)
manuals. In general the Lisp Machine seems to have the most features,
and the other dialects are probably more or less subsets of it.  Of the
current projects, there are considerable differences in style:
  Franz Lisp is the most conservative.  They wanted something up
	quickly, using existing technology.  It was done in C, which
	should help its transportability.
  Lisp Machine is the most radical.  First, it is standalone, with
	microcode support.  Second, it has every language feature one
	can imagine except for spaghetti stacks.  Finally, it supports
	the bit-mapped display.  They believe that many of its features
	could only be done on special-purpose hardware.  This might
	possibly be transportable to another microcodable machine with
	similar capabilities, though no particular thought was given to
	portability.
  Spice Lisp and NIL are in some sense compromises.  They are attempts
	at cleaning up the old language design, and taking many of the
	good ideas of Lisp Machine Lisp, but for somewhat more
	conventional machines.  At the moment these projects talk to
	each other, but are planning to implement somewhat different
	dialects.  They were strongly encouraged to talk to each other.
	They are both giving thought to portability, though SPICE is
	only intended to be portable among personal machines with
	certain capabilities.

The big question is, why so many projects?  As far as I can see,
here are the justifications:
  MDL - this is a new language design, with generally good ideas. It has
	been very influential on newer developments in Lisp.
  Utah - this is a research project in portability, and seems to be
	doing very good work.  In retrospect, the AI community would be
	much better off if Utah had decided to do their research using
	either Interlisp or MACLisp.  As it is, they have attempted to
	create a new dialect, and no one is interested.  Probably their
	work will be ignored, much to everyone's hurt, unless they
	decide to change to another dialect, which I do not expect.
  Franz Lisp and Elisp - these are projects to transport existing
	dialects to machines with lots of users that desperately needed
	the results. Elisp should die away eventually if other projects
	succeed. Franz Lisp may well survive.
  Interlisp - these projects are simply transporting Interlisp to other
	machines, which is always reasonable.  The only real criticism
	here would be that it is a shame that they can't develop
	technology to produce a new implementation more quickly.  But
	this is what SRI proposes to do.  In my opinion it is critical
	for that project to be done in such a way that the results are
	public.
  MACLisp - it is unfortunate that so much work is being done on MACLisp
	in an uncoordinated way.  There is some evidence that each
	project is made up of "true believers" who believe that there is
	little merit to the others.  We heard a somewhat amusing comment
	by one.  He said it wasn't true that there was chaos among the
	MACLisp people.  It was just that there were 4 different
	projects going in 4 different directions....

Everyone seems to believe that it is a good idea for there to be ongoing
work on both Interlisp and MACLisp.  Interlisp tends to be more
conservative in its development.  It has good user facilities and
well-coordinated support.  But there was a surprising concensus (that
included most of the Interlisp users) that
  - Interlisp is quite expensive compared to MACLisp
  - Interlisp as a dialect lacked a number of important features
	compared to MACLisp, and had no important features missing in
	MACLisp. Note that this comment refers only to facilities
	commonly used in programs, not to the user support environment.
	Many people believe that in user support Interlisp is better
	than MACLisp (except on the Lisp Machine, where MACLisp really
	shines)
Thus what was keeping people with Interlisp is
  - good user facilities
  - the fact that all implementations are compatible
  - good, complete documentation
  - good support

To the outside observer it appeared that in the long run MACLisp might
in fact take over if it could do the following:
  - supply the user facilities of the same power as Interlisp's, or
	document the fact that it already has them (if it does - I take
	no position on this question)
  - agree on a common language definition, with extensions for the Lisp
	Machine and others who need it
  - produce a complete manual, showing all the user facilities and being
	common among implementations.  A good form for this might be a
	loose-leaf binder, so that they could provide additional pages
	and chapters for the Lisp machine, and let you select which
	debugger and editor you wanted.
  - somehow assure users outside MIT that there was a central support
	organization that would respond to their concerns.  (This seems
	to be true, but there may be a PR problem.)
  - possibly do something to isolate non-MIT users from the hack-of-the
	week club, while preserving the fact that Maclisp will continue
	to develop more aggressively than Interlisp.  It is unclear
	whether there is a real problem here or a PR problem.

I am not convinced that the MACLisp folks can do this if left to
themselves. There was a proposal that ARPA might somehow be able to
cause it to happen.

Finally there is the question of what will happen next as far as
hardware acquisition.  We polled representatives of various user groups.
Most of them have DEC-10's and -20's.  They all say the PDP-10 is dead,
but are still doing most of their work on it.  However there are also a
significant number of people who use VAX as their primary system.  Some
of these are using Franz Lisp.  It seems fine.  But many are desperate
for VAX Interlisp.  Few people are currently using personal computers
for much, or even have definite plans to use them.  The main places that
are highly committed are
   MIT, where most Lisp work is using Lisp Machines
   BBN, which is committed to Jericho
   CMU, which is committed to Spice (in a somewhat indefinite future), 
   PARC, where most Lisp work is done with Dolphins and Dorados
   HPP(Sumex), which is committed to Dolphins
Others are hedging their bets with one or two experimental machines, or
are just plain waiting.

On the other hand, there are a number of complaints of a serious
shortage of cycles.  The most vocal is HPP/Sumex.  Thus there is some
pressure on ARPA to do something immediately.  HPP is pressuring them to
buy a large group of Dolphins.  There may also be pressure from
elsewhere, but it was not clear in this meeting.  A number of other
people have said that if ARPA does go this way, they will follow.  But
few people other than HPP feel strongly enough to commit to Dolphins
with their own money, unless ARPA does.  If ARPA does, then it will
become a de facto standard and no one wants to be left out.  People are
concerned that the Dophin is a bit too slow (apparently about 1/2 the
power of a Lisp Machine). The feeling is that a year ago it would have
been a great idea, but its time may have passed.  The problem is that
there is no obvious other candidate.  People advised ARPA to look around
carefully to see if there wasn't something better.  One problem is that
whatever it is must support Interlisp.  Thus the Lisp machine will only
work if they will put Interlisp-D on it.  This might be technically
feasible, but the Lisp Machine people aren't interested in doing it.
(Whether the smell of that much money will arouse their appetite remains
to be seen.) The Jericho isn't quite ready, nor are any other personal
computers, though many are on the horizon.  The only other thing that
could be done quickly would be a single user VAX, which DEC might be
able to build if they would, or a small Foonly (a PDP-10 equivalent).
DEC had a single user PDP-10, which would be an ideal solution, but
decided not to build it.  The guess is that even ARPA isn't a large
enough customer to cause DEC to do something it otherwise wouldn't do.
If there is nothing better than the Dolphin, I don't have much of a
guess whether ARPA will get it or not. I think they may flip a coin.

The problem is that the proposed buy of Dolphins would be a large
commitment, and might make it hard for ARPA to take advantage of a
better system that may show up in a year.  It is not just that the
hardware will be expensive.  It is that by the time you add in the
software and hardware support, the total is 3 times the actual hardware
purchase price.

At that point the meeting ended, with no conclusion.
-------

∂22-Apr-81  1801	Bernard S. Greenberg       <Greenberg at MIT-Multics> 	Multics Timing results vindicated  
Date:     22 April 1981 2057-est
From:     Bernard S. Greenberg       <Greenberg at MIT-Multics>
Subject:  Multics Timing results vindicated
To:       RPG at SU-AI
Cc:       CHoffman.mal at MIT-Multics, HIC at MIT-MC,
    lisptranslators at SU-AI

Laugh not at Multics nor Multics Maclisp, but at a pretty
cruddy open-code implementation of MAPCAN.  Due to footwork
by Carl Hoffman and myself, we found our compiler generating
calls to NCONC in the code for mapcan! Needless to say,t his
caused quadratic time expansion in list-searching! 
This was the cause of the 86-second run.  Recoding
mapcan as a macro keeping track of the end of the list cleverly,
I produced the following results of which I am no longer ashamed:

lisp
*
(setq base 10.)
10.
(load 'gabx)
t
(test)
2592.
(runtime 3.690232)
(gctime 2.478373)
t
(test)
2592.
(runtime 3.679003)
(gctime 3.930743)
t
(test)
2592.
(runtime 3.693353)
(gctime 2.650682)
t
(quit)

∂23-Apr-81  1232	RPG  	FRANZ Benchmark    (FRPOLY)
To:   lisptranslators at SU-AI   
Here, below, is the benchmark from Berkeley. It is in roughly
MacLisp syntax, but let me point out a few things about it.

First, DEFMACRO and the ` (backquote) syntax. DEFMACRO is
a mechanism for defining macros in MacLisp in which the form
is broken into named arguments, unlike standard MacLisp macros
with have exactly 1 argument which is the macro form itself (EQly
that form). The backquote syntax takes a form and produces code
to generate that form. A example helpe here:

	`(atom ,e) turns into (list 'atom e)
	`(signp e ,x) is (list 'signp 'e x)

Thus, , (comma) is the unquoting character.
For example, then, occurrences of (pcoefp x) in the code
below turn into (atom x) by the action of the macro
pcoefp. DEFMACRO provides a form which is substituted for
the calling form with arguments bound in the obvious manner.
Here is the equivalent standard MacLisp macro definition of
pcoefp:

	(defun pcoefp macro (x)
	       (list 'atom (cadr x)))

To run this benchmark interpretively, I suggest expanding the
macros once, either at read time or at first runtime. For those
who need it I can provide this file with macros expanded.

Another hack for defining these macros so that they are expanded
once only is:

(defun pcoefp macro (x)
  ((lambda (form)
    (rplaca x (car form))
    (rplacd x (cdr form))
    form)		   ;value of RPLACD assumed to be undefined
   (list 'atom (cadr x))))

LOCALF seems to be a declaration of LOCAL function names. For MacLisp
I've commented this out. SPECIAL means that there is a global
value cell and that binding is dynamic on that cell.

Here is what SIGNP does:

2) SIGNP IS NOW A FSUBR.  THE FIRST ITEM IN THE ARGLIST IS AN
INDICATOR FOR COMPARISON TO ZERO, E.G., (SIGNP LE N) IS NON-NIL
IF AND ONLY IF THE VALUE OF N IS A NUMBER LESS THAN OR EQUAL TO 
ZERO [SIGNP DOES NOT REQUIRE N TO BE OF NUMBER TYPE].  THE
INDICATORS FOLLOW THE PDP-10 ARITHMETIC COMPARISON INSTRUCTIONS, AND
SHOULD BE SELF EXPLANATORY:  L E LE GE N G 
[E means zerop, N means not zerop.]

(RUNTIM) and (STATUS GCTIME) return the number of microseconds of
total runtime and gctime. Note that gctime is included in
runtime in MacLisp.

There is a difference between `+' and `PLUS' in Franz, which is
that + takes 2 arguments, both fixnums (machine integers) and returns
a fixnum as its result. PLUS takes any number of any type of number and
returns the most appropriate type number. In the tests below, one of them
is designed to overflow the VAX machine integer range and drift into
BIGNUMs, which are any integer larger than the architecture supports. In MacLisp
and FRANZ there is a BIGNUM packake that allows one to have contiguous
words of memory represent one number. So, beware of where there are +'s and
PLUS's. The same is true for - and DIFFERENCE, * and TIMES, / and QUOTIENT,
> and GREATERP, < and LESSP, etc. Generic arithmetic is closed compiled
while specific type is open coded.

(ODPP x) tests if X is odd.

= is numeric EQUAL.

PDIFFER1 is mentioned but not defined; is not called for these tests, however.

Here's my transcript of SAIL MacLisp:

(setup)
(Z 1 1.0 0 (Y 1 1.0 0 (X 1 1.0 0 1.0))) 
(bench 2)
(POWER= 2 (0.017 0.0) (0.017 0.0) (0.016 0.0)) 
(bench 5)
(POWER= 5 (0.116 0.0) (1.334 1.084) (0.15 0.0)) 
(bench 10)
(POWER= 10 (2.534 1.8) (19.733 17.151) (8.983 7.901)) 
(bench 15)
(POWER= 15 (16.65 8.832) (112.516 89.298) (63.9 56.749)) 

Which I ran compiled. Times are in seconds.

The following is the benchmark. 
			-rpg-


;;;; Benchmark Commences:

;;; Franz Lisp benchmark from Fateman
;; test from Berkeley based on polynomial arithmetic.

(declare (special ans coef f inc i k qq ss v *x*
		    *alpha *a* *b* *chk *l *p q* u* *var *y*
		    r r2 r3 start res1 res2 res3))
(declare (localf pcoefadd pcplus pcplus1 pplus ptimes ptimes1
		 ptimes2 ptimes3 psimp pctimes pctimes1
		 pplus1))
;; Franz uses maclisp hackery here; you can rewrite lots of ways.
(defmacro pointergp (x y) `(> (get ,x 'order)(get ,y 'order)))

(defmacro pcoefp (e) `(atom ,e))
(defmacro pzerop (x) `(signp e ,x))			;true for 0 or 0.0
(defmacro pzero () 0)
(defmacro cplus (x y) `(plus ,x ,y))
(defmacro ctimes (x y) `(times ,x ,y))


(defun pcoefadd (e c x) (cond ((pzerop c) x)
			      (t (cons e (cons c x)))))

(defun pcplus (c p) (cond ((pcoefp p) (cplus p c))
			  (t (psimp (car p) (pcplus1 c (cdr p))))))

(defun pcplus1 (c x)
       (cond ((null x)
	      (cond ((pzerop c) nil) (t (cons 0 (cons c nil)))))
	     ((pzerop (car x)) (pcoefadd 0 (pplus c (cadr x)) nil))
	     (t (cons (car x) (cons (cadr x) (pcplus1 c (cddr x)))))))
	 
(defun pctimes (c p) (cond ((pcoefp p) (ctimes c p))
			   (t (psimp (car p) (pctimes1 c (cdr p))))))

(defun pctimes1 (c x)
       (cond ((null x) nil)
	     (t (pcoefadd (car x)
			  (ptimes c (cadr x))
			  (pctimes1 c (cddr x))))))

(defun pplus (x y) (cond ((pcoefp x) (pcplus x y))
			 ((pcoefp y) (pcplus y x))
			 ((eq (car x) (car y))
			  (psimp (car x) (pplus1 (cdr y) (cdr x))))
			 ((pointergp (car x) (car y))
			  (psimp (car x) (pcplus1 y (cdr x))))
			 (t (psimp (car y) (pcplus1 x (cdr y))))))

(defun pplus1 (x y)
       (cond ((null x) y)
	     ((null y) x)
	     ((= (car x) (car y))
	      (pcoefadd (car x)
			(pplus (cadr x) (cadr y))
			(pplus1 (cddr x) (cddr y))))
	     ((> (car x) (car y))
	      (cons (car x) (cons (cadr x) (pplus1 (cddr x) y))))
	     (t (cons (car y) (cons (cadr y) (pplus1 x (cddr y)))))))

(defun psimp (var x)
       (cond ((null x) 0)
	     ((atom x) x)
	     ((zerop (car x)) (cadr x))
	      (t (cons var x))))

(defun ptimes (x y) (cond ((or (pzerop x) (pzerop y)) (pzero))
			  ((pcoefp x) (pctimes x y))
			  ((pcoefp y) (pctimes y x))
			  ((eq (car x) (car y))
			   (psimp (car x) (ptimes1 (cdr x) (cdr y))))
			  ((pointergp (car x) (car y))
			   (psimp (car x) (pctimes1 y (cdr x))))
			  (t (psimp (car y) (pctimes1 x (cdr y))))))

(defun ptimes1 (*x* y) (prog (u* v)
			       (setq v (setq u* (ptimes2 y)))
			  a    (setq *x* (cddr *x*))
			       (cond ((null *x*) (return u*)))
			       (ptimes3 y)
			       (go a)))

(defun ptimes2 (y) (cond ((null y) nil)
			 (t (pcoefadd (plus (car *x*) (car y))
				      (ptimes (cadr *x*) (cadr y))
				      (ptimes2 (cddr y))))))

(defun ptimes3 (y) 
  (prog (e u c) 
     a1 (cond ((null y) (return nil)))
	(setq e (+ (car *x*) (car y)))
	(setq c (ptimes (cadr y) (cadr *x*) ))
	(cond ((pzerop c) (setq y (cddr y)) (go a1))
	      ((or (null v) (> e (car v)))
	       (setq u* (setq v (pplus1 u* (list e c))))
	       (setq y (cddr y)) (go a1))
	      ((= e (car v))
	       (setq c (pplus c (cadr v)))
	       (cond ((pzerop c) (setq u* (setq v (pdiffer1 u* (list (car v) (cadr v))))))
		     (t (rplaca (cdr v) c)))
	       (setq y (cddr y))
	       (go a1)))
     a  (cond ((and (cddr v) (> (caddr v) e)) (setq v (cddr v)) (go a)))
	(setq u (cdr v))
     b  (cond ((or (null (cdr u)) (< (cadr u) e))
	       (rplacd u (cons e (cons c (cdr u)))) (go e)))
	(cond ((pzerop (setq c (pplus (caddr u) c))) (rplacd u (cdddr u)) (go d))
	      (t (rplaca (cddr u) c)))
     e  (setq u (cddr u))
     d  (setq y (cddr y))
	(cond ((null y) (return nil)))
	(setq e (+ (car *x*) (car y)))
	(setq c (ptimes (cadr y) (cadr *x*)))
     c  (cond ((and (cdr u) (> (cadr u) e)) (setq u (cddr u)) (go c)))
	(go b))) 

(defun pexptsq (p n)
	(do ((n (quotient n 2) (quotient n 2))
	     (s (cond ((oddp n) p) (t 1))))
	    ((zerop n) s)
	    (setq p (ptimes p p))
	    (and (oddp n) (setq s (ptimes s p))) ))

(defun setup nil
  (putprop 'x 1 'order)
  (putprop 'y 2 'order)
  (putprop 'z 3 'order)
  (setq r (pplus '(x 1 1 0 1) (pplus '(y 1 1) '(z 1 1)))) ; r= x+y+z+1
  (setq r2 (ptimes r 100000)) ;r2 = 100000*r
  (setq r3 (ptimes r 1.0)); r3 = r with floating point coefficients
  )
; time various computations of powers of polynomials, not counting
;printing but including gc time ; provide account of g.c. time.

; The following function uses (ptime) for process-time and is thus
;  Franz-specific.

(defmacro ptime () '`(,(runtime) ,(status gctime)))

(defun bench (n)
  (setq start (ptime)) ;  Franz ticks, 60 per sec, 2nd number is GC
  (pexptsq r n) 
  (setq res1 (ptime))
  (pexptsq r2 n)
  (setq res2 (ptime))
  ; this one requires bignums.
  (pexptsq r3 n)
  (setq res3 (ptime))
  (list 'power=  n (b1 start res1)(b1 res1 res2)(b1 res2 res3)))
(defun b1(x y)(mapcar '(lambda(r s)(quotient (float (- s r)) 1000000.0)) x y))

;instructions:
;  after loading, type (setup)
; then (bench 2) ; this should be pretty fast.
; then (bench 5)
; then (bench 10)
; then (bench 15)
;... 

∂23-Apr-81  1245	RPG  	Franz benchmark    
To:   lisptranslators at SU-AI   
The FRANZ benchmark is to referred to as: FRPOLY.
			-rpg-

∂24-Apr-81  1324	Bernard S. Greenberg       <Greenberg at MIT-Multics> 	Re: FRANZ Benchmark, Multics Numbers    
Redistributed-Date:  24 April 1981 16:24 est
Redistributed-By:  Greenberg.Symbolics at MIT-Multics
Redistributed-To:  lisptranslators at SU-AI, rpg at SU-AI
Date:     23 April 1981 2059-est
From:     Bernard S. Greenberg       <Greenberg at MIT-Multics>
Subject:  Re: FRANZ Benchmark, Multics Numbers
To:       RPG at SU-AI
Cc:       lisptranslators at SU-AI

Here they are, and not bad at all.  Bear in mind that Multics Lisp
represents all fixna and flona as immediate, thus has no PDL
numbers, pdlnmks, number consing, etc.  Bigna are allocated
contiguously....  Code was compiled, using installed system
backquote and an adhoc defmacro definition. Wasn't clear if
RPG's input numbers were decimal or octal, so I did it both ways:

lisp
*
(load 'gab2)
t
(setup)
(z 1 1.0 0 (y 1 1.0 0 (x 1 1.0 0 1.0)))
(bench 2)
(power= 2 (0.016692 0.0) (0.015114 0.0) (0.015725 0.0))
(bench 5)
(power= 5 (0.150491 0.0) (0.212428 0.0) (0.154568 0.0))
(bench 10)  ;=8
(power= 10 (0.968238 0.184816) (1.71576 0.389726) (0.99761099 0.187837))
(bench 10.) ;decimal
(power= 12 (2.000796 0.405341) (3.569996 0.880229) (1.883108 0.231459))
(bench 15) ;octal = 13.
(power= 15 (6.563067 1.148998) (13.168704 2.515469) (6.694873 1.155386))
(bench 15.) ;decimal
(power= 17 (12.532608 1.85896) (27.568518 5.391129) (12.636826 1.860995))
(quit)

hmu

Multics 35.0a, load 42.0/120.0; 42 users, 27 interactive, 12 daemons.
Absentee users 1/3 (+2 FG)

∂24-Apr-81  1414	RPG  	Errata   
To:   lisptranslators at SU-AI   
In the FRPOLY benchmarks, the calls should be:
	(SETUP)	
	(BENCH 2.)
	(BENCH 5.)
	(BENCH 10.)
	(BENCH 15.)
			-rpg-

∂24-Apr-81  1608	CSVAX.jkf at Berkeley 	octal vrs decimal
Date: 24 Apr 1981 15:52:49-PST
From: CSVAX.jkf at Berkeley
To: lisptranslators@su-ai
Subject: octal vrs decimal

 I can't see any reason for using octal for input or output in any benchmark.
Why don't we just make it a rule that all numbers are decimal to reduce
confusion in the future.

∂25-Apr-81  1242	Greenberg.Symbolics at MIT-Multics 	Re: octal vrs decimal   
Date:  25 April 1981 15:39 est
From:  Greenberg.Symbolics at MIT-Multics
Subject:  Re: octal vrs decimal
To:  CSVAX.jkf at BERKELEY
cc:  lisptranslators at SU-AI
In-Reply-To:  Message of 24 April 1981 18:52 est from CSVAX.jkf

Why dont we put trailing dots on numbers so no-one has a chance to blow it?

∂25-Apr-81  1320	Vanmelle at SUMEX-AIM 	Re:  Re: octal vrs decimal 
Date: 25 Apr 1981 1315-PST
From: Vanmelle at SUMEX-AIM
Subject: Re:  Re: octal vrs decimal
To:   Greenberg.Symbolics at MIT-MULTICS, CSVAX.jkf at BERKELEY
cc:   lisptranslators at SU-AI

 In response to the message sent 25 Apr 1981 1243-PST by 

I'm with jkf.  Lisp is not assembly language; why should anyone have
to qualify decimal integers?  Adding decimal points could lead to
additional confusion--in at least one dialect (Interlisp), decimal
points imply floating-point.
-------

∂25-Apr-81  1727	Greenberg.Symbolics at MIT-Multics 	Re:  Re: octal vrs decimal   
Date:  25 April 1981 20:25 est
From:  Greenberg.Symbolics at MIT-Multics
Subject:  Re:  Re: octal vrs decimal
To:  Vanmelle at SUMEX-AIM
cc:  CSVAX.jkf at BERKELEY, lisptranslators at SU-AI
In-Reply-To:  Message of 25 April 1981 16:15 est from Vanmelle

The issue here is not language design or what
Lisp oughtta be; the issue is minimizing confusion in
translation between dialects and making the rules clear.

∂25-Apr-81  2210	CSVAX.jkf at Berkeley 	Re:  Re: octal vrs decimal 
Date: 25 Apr 1981 21:34:31-PST
From: CSVAX.jkf at Berkeley
To: Greenberg.Symbolics@MIT-Multics
cc: lisptranslators at SU-AI, Vanmelle at SUMEX-AIM
Subject: Re:  Re: octal vrs decimal
In-reply-to: Your message of 25 Apr 1981 1725-PST (Saturday).

    Date: 25 Apr 1981 1725-PST (Saturday)
    From: Greenberg.Symbolics@MIT-Multics
    Subject:  Re:  Re: octal vrs decimal
    To:  Vanmelle at SUMEX-AIM
    cc:  CSVAX.jkf at BERKELEY, lisptranslators at SU-AI
    In-Reply-To:  Message of 25 April 1981 16:15 est from Vanmelle

    The issue here is not language design or what
    Lisp oughtta be; the issue is minimizing confusion in
    translation between dialects and making the rules clear.

Right,  we should make the conversion between dialects as easy as possible,
and that is why I suggest that we make decimal the default.  I don't know
of any dialect which can't understand decimal.  Our lisp system (Franz)
at least only handles octal on input not on output (since we've never found
any need for it).

∂24-Apr-81  1411	CSVAX.jkf at Berkeley 	franz timing results  
Date: 24 Apr 1981 13:22:17-PST
From: CSVAX.jkf at Berkeley
To: rpg@su-ai
Subject: franz timing results
Cc: CSVAX.fateman@Berkeley, CSVAX.jkf@Berkeley

							frpoly benchmark 
							submitted by j foderaro
							(csvax.jkf@berkeley)
							24 april 81

Here are the results or running the pairs benchmark on Franz Lisp on 
a VAX 11/780 runing Berkeley 4BSD Unix.  The load average was less
than one when the timing was done.  These results supersede the timings
you have which were made by us in March.



Script started on Fri Apr 24 06:04:12 1981
Reval: lisp
Franz Lisp, Opus 34
-> (load 'frpoly)
[fasl frpoly.o]
t
-> (setup)
(z 1 1.0 0 (y 1 1.0 0 (x 1 1.0 0 1.0)))
-> (bench 2)
(power= 2 (0.0167 0.0) (0.0333 0.0) (0.0 0.0))
-> (bench 5)
(power= 5 (0.15 0.0) (0.75 0.4333) (0.3833 0.2166))
-> (bench 10)
(power= 10 (2.8167 1.05) (8.2333 3.3) (3.2 1.2333))
-> (bench 15)
(power= 15 (18.2333 5.35) (92.0333 41.6333) (18.8 5.1333))
-> 
script done on Fri Apr 24 06:07:48 1981

∂28-Apr-81  1122	Vanmelle at SUMEX-AIM 	Re: Benchmarks        
Date: 28 Apr 1981 1118-PDT
From: Vanmelle at SUMEX-AIM
Subject: Re: Benchmarks    
To:   RPG at SU-AI

 In response to your message sent 28 Apr 1981 0625-PDT

Sure.  I can run them both on a 2020 (Sumex2020) and a 2060 (Score).
Has someone already done th e translation?

	Bill
-------

I don't think so. The only tricky parts are the more-than-5-args problem
and catch/throw. I've faked catch and throw in InterLisp with the spaghetti
stuff. Let me try to locate what I did and show you. You may luck out because
at one point I had a MacLisp=>InterLisp translator that I used. I'll also
try to dig that one up and see what it does to the benchmark.
			-rpg-
∂28-Apr-81  2115	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	Re: FRANZ Benchmark        
Date: 28 Apr 1981 2310-CDT
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: Re: FRANZ Benchmark    
To: RPG at SU-AI
In-Reply-To: Your message of 23-Apr-81 1432-CST


    Mail from SU-AI rcvd at 24-Apr-81 0002-CST
    Date: 23 Apr 1981 1232-PST
    From: Dick Gabriel <RPG at SU-AI>
    Subject: FRANZ Benchmark    
    To:   lisptranslators at SU-AI   

      .......

    Here's my transcript of SAIL MacLisp:

    (setup)
    (Z 1 1.0 0 (Y 1 1.0 0 (X 1 1.0 0 1.0))) 
    (bench 2)
    (POWER= 2 (0.017 0.0) (0.017 0.0) (0.016 0.0)) 
    (bench 5)
    (POWER= 5 (0.116 0.0) (1.334 1.084) (0.15 0.0)) 
    (bench 10)
    (POWER= 10 (2.534 1.8) (19.733 17.151) (8.983 7.901)) 
    (bench 15)
    (POWER= 15 (16.65 8.832) (112.516 89.298) (63.9 56.749)) 

    Which I ran compiled. Times are in seconds.
	.......


I have a question about the results you show.  The problem I see is that
the difference between the first result of (BENCH 15) and the third
result of (BENCH 15).  If I am not wrong, the runtimes after subtracting
out the (rather large) GC times is 7.818 vs 7.151.  The difference in
the two examples is that the first one uses integer coefficients while the
other uses the real number version of the same integer.  That indicates
to me that your machine does real-number multiplication faster than it
does integer multiplication???   Note that the results for the smaller
benchmarks give the expected result that the integer multiplication is
faster than real number multiplication.

I am curious about this.  Are you sure of your results?  Do you get the
same results if you increase your free space (or whatever) so that the
amount of GC decreases to something much less than 80% of your total
time.  If the real number version continues to run faster, I would be
very interested in why it is faster.
-------

FRPOLY at SAIL
The transcript was made blindly to give some rough idea of the runtime.
I'll run the entire thing again today and send out the real results.
			-rpg-
∂02-May-81  1245	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	Re: FRANZ Benchmark        
Date:  2 May 1981 1437-CDT
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: Re: FRANZ Benchmark    
To: RPG at SU-AI
In-Reply-To: Your message of 23-Apr-81 1432-CST

Here is the results of the FRPOLY benchmark.   I did notice that compilation
of this benchmark resulted in a more of a speedup that it did on the
SCCPP benchmark.

My comment in the notes at the end mentions that UCI-Lisp doesn't have a
BIGNUM package.  I don't know that for sure.  The assembly language source
has a number of hooks for a BIGNUM package but I don't know where that package
is.

		Results of LISP timing tests for UCI-Lisp

(Times are in the form R+G where R is the runtime (not including GC time)
and G is the GC time.  All times are in seconds.  "Interp" means interpreted,
"Slow" means compiled but using UUO-links, "Fast" means compiled with the UUO
links replaced by direct jumps.)


				   Processor
Program			KL-2060			KI-1060
	   Interp	Slow	  Fast	     Interp	  Slow		Fast

FRPOLY:
 Free:			 100000.		75000.
(bench	 0.719+0     0.142+0	0.043+0	     2.627+0	 0.576+0     0.181+0
  2)	 0.677+0     0.142+0	0.047+0	     2.619+0	 0.545+0     0.168+0
First	 0.677+0     0.141+0	0.042+0	     2.698+0	 0.580+0     0.166+0
 result	 0.687+0     0.140+0	0.043+0			 0.612+0     0.155+0
	----------------------------------------------------------------------
Third	 0.706+0     0.162+0	0.063+0	     2.585+0	 0.630+0     0.256+0
 result	 0.830+0     0.164+0	0.063+0	     2.798+0	 0.610+0     0.227+0
	 0.702+0     0.162+0	0.062+0	     2.733+0	 0.695+0     0.252+0
	 0.700+0     0.162+0	0.065+0			 0.593+0     0.215+0
	======================================================================
(bench	 5.88+0      1.166+0	0.343+0	    22.25+0	 4.384+0     1.451+0
  5)	 5.696+0     1.142+0	0.355+0     21.87+0	 4.462+0     1.297+0
First	 5.706       1.146+0	0.338+0			 4.719+0     1.500+0
 result		     1.18+0	0.351+0
	----------------------------------------------------------------------
Third	 5.891+0     1.343+0	0.523+0	    23.04+0	 4.964+0     2.097+0
 result	 5.880+0     1.383+0	0.51+0	    21.64+0	 5.084+0     2.065+0
	 5.884+0     1.345+0	0.522+0			 5.093+0     2.048+0
		     1.341+0	0.514+0
	======================================================================
(bench	122.2+1.1   25.48+1.02  8.63+1.04	--          --	    31.91+2.12
  10)		    25.14+0.98	8.42+1.02
First		    25.53+1.03	8.47+1.01
	----------------------------------------------------------------------
Third	126.4+2.2   28.17+2.02 11.57+2.04	--	    --	    39.07+6.07
 result		    28.26+2.03 11.54+2.04
		    28.18+2.04 11.28+1.98
	======================================================================
(bench	  --	    39.22+2.16 12.59+2.00	--	    --		--
  15)			       12.98+2.06
First
	----------------------------------------------------------------------
Third	  --	    43.46+3.08 17.22+3.02	--	    --		--
 result			       17.58+3.05
	======================================================================

Note:  The results referred to as the first result is the result obtained
as the first value returned by BENCH.  This is the value computed using
integer coefficients.  The result referred to as the third result is the
third value returned by BENCH (for real number coefficients).  UCI Lisp
does not have the bignum package so it could not compute the second
result returned by the BENCH routine.

-------

∂04-May-81  1326	correira at UTEXAS-11  	UTLISP benchmarks    
Date:  4 May 1981 at 1516-CDT
From: correira at UTEXAS-11 
Subject: UTLISP benchmarks
To: rpg at su-ai
cc: correira

Hi! I have been very busy of late so I was only able to get to the
benchmarks late last week.  I have three files to send and I would
like to know whether I should send them directly to you or to
LISPTIMINGS:

1) an overview of UTLISP 5.1 and the OS I am running the benchmarks
under (UT-2D),

2) the timings for SCCPP,

3) the timings for FRPOLY.

Sorry it took so long to get around to it.

					Sincerely,
					Alfred Correira
-------

Normally they are sent to LISPTRANSLATORS, which has the effect
of goading other translators into doing their jobs!
			-rpg-
∂05-May-81  0643	correira at UTEXAS-11  	SCCPP Timings for UTLISP  
Date:  5 May 1981 at 0835-CDT
From: correira at UTEXAS-11 
Subject: SCCPP Timings for UTLISP
To: lisptranslators at su-ai

Following are the timings for SCCPP for UTLISP 5.1. Because of local
interactive field length (= core size) restrictions, all runs were
submitted as batch jobs. "runtime" does not include "gctime".


Interpreted:

   run#:       #1        #2        #3        #4        #5

runtime:     18.15     18.17     18.25     18.22     18.26
 gctime:      1.34      1.37      1.34      1.35      1.34

Run at 200000 (octal) field length with: 45300. words of free space
                                          3030. words of full space
There were three garbage collections for each run.


Compiled:

   run#:       #1        #2        #3        #4        #5

runtime:      3.95      4.09      4.06      4.10      4.03
 gctime:       .97       .82       .84       .82       .83

Run at 20000 (octal) field length with: 45524. words of free space
                                         2957. words of full space
There were three garbage collections for each run.
-------

∂05-May-81  0643	correira at UTEXAS-11  	FRPOLY Timings for UTLISP 
Date:  5 May 1981 at 0836-CDT
From: correira at UTEXAS-11 
Subject: FRPOLY Timings for UTLISP
To: lisptranslators at su-ai

Following are the results for FRPOLY under UTLISP 5.1.  The runs at
75000 (octal) were run interactively; the remainder were submitted
as batch jobs. "runtime" does NOT include "gctime".

Interpreted:

bench 2: (runtime+gctime)

R:     1.168+0     1.168+0     1.149+0     1.147+0
R2:    1.181+0     1.162+0     1.171+0     1.174+0
R3:    1.175+0     1.170+0     1.171+0     1.179+0

bench 5: (runtime over gctime)

R:     9.910       9.917       9.868       9.904
        .156        .145        .152        .148
R2:    5.237       5.261       5.245       5.237
        .162        .156        .157        .156
R3:    9.930       9.899       9.960       9.927
        .323        .315        .311        .300

bench 10:

R:   213.160
       4.205
R2:    7.136
        .168
R3:  213.650
       3.994

bench 2 and bench 5 were run at a field length (= core size) of
75000 (octal) words with:  free space of 7500. words
                           full space of 2583. words
bench 2 required no garbage collections; bench 3 required 4 garbage
collections for each run.

bench 10 was run at a field length of 200000 (octal) words with:
                           free space of 41706. words
                           full space of 6685. words
bench 10 required 38 garbage collections.
For obvious reasons, I did not run bench 15.


Compiled:

bench 2:

R:      .173        .139        .153        .149
       0.          0.           .108       0.
R2:     .165        .167        .156        .150
       0.           .115       0.          0.
R3:     .155        .154        .165        .183
       0.          0.          0.          0.

bench 5:

R:     1.406       1.361       1.353       1.327
        .328        .356        .366        .385
R2:     .897        .872        .857        .861
        .159        .253        .257        .260
R3:    1.430       1.372       1.382       1.375
        .325        .395        .396        .269

bench 10:

R:    30.043      30.009      30.016
       3.989       3.866       4.010
R2:    1.219       1.172       1.218
        .143        .179        .155
R3:   30.495      30.509      30.528
       4.015       3.896       3.916

bench 15:

R:    46.046      46.030
       7.346       7.330
R2:    2.120       2.122
        .171        .174
R3:   46.945      46.736
       7.077       7.264

bench 2 and bench 5 were run at a field length of 75000 (octal)
words with:  free space of 14177. words
             full space of 1554. words
bench 2 required 0 or 1 garbage collections per run; bench 5
required 7 to 8 garbage collections per run.

bench 10 and bench 15 were run at a field length of 200000 (octal)
words with:  free space of 42913. words
             full space of 6859. words
bench 10 required 37 garbage collections per run; bench 15 required
63 garbage collections.

As you can see from the R2 results, there are times when a 60 bit word
size can come in handy.
-------

∂05-May-81  0643	correira at UTEXAS-11  	A thumbnail sketch of UTLISP   
Date:  5 May 1981 at 0821-CDT
From: correira at UTEXAS-11 
Subject: A thumbnail sketch of UTLISP
To: lisptranslators at su-ai

Below is a short description of UTLISP and the OS I am running the
benchmarks under, provided for those of you (which is probably just
about everybody) who either have never seen this LISP dialect or
whose only exposure has consisted of denigratory comments scribbled
on bathroom walls.

Alfred Correira

-------------------------------------------------------------------

UTLISP is a LISP dialect for Control Data Corportaion 6000, Cyber
70, and Cyber 170 class machines. It is currently running at over
50 sites at some level (from  UTLISP3  to  UTLISP5.1)  and  under
various  operating  systems  (NOS,  NOS/BE,  KRONOS,  SCOPE,  and
several home-brews, including our local UT-2D).  UTLISP is  based
on  Blue  Book  Lisp; the first crude implementation goes back to
1967 as a project sponsored by W. W.  Bledsoe.   Extensive  revi-
sions  were  made  in  the  late  60s  and early 70s by Dr. E. M.
Greenawalt, and by Bob Amsler, Jonathan Slocum, and Mabry  Tyson,
culminating  in  Version  4.  Version 5 was the product of my ef-
forts in 1979-1980.  The current system consists  of  the  inter-
preter,  a compiler, LAP, and runtime support utilities.  Most of
UTLISP is written in COMPASS (the assembly language  of  the  CDC
mainframes)  although  much of the runtime support stuff released
with Version 5 is coded in LISP.  The compiler is  an  old,  can-
tankerous,  and obscure master's project from around 1971 that is
not frequently used - most people who use UTLISP  depend  on  the
speed  of the underlying hardware to make up for this.  UTLISP is
not used much for system-building; Bledsoe's theorem  prover  was
about  the last big project written in UTLISP that I know of, and
it was converted to UCILISP years ago due to the primitive nature
of  UTLISP at that time and the memory constraints imposed by our
OS.

Atoms in UTLISP are 60 bit quantities, including 3-18 bit  fields
(CAR,  CDR,  CSR)  plus flag bits.  The CAR usually points at the
atom itself or to previous values of the atom  (i.e.  values  are
maintained  as  stacks  on the atoms themselves rather than being
pushed onto the runtime stack). The CDR field usually  points  at
the  current  value  of the atom, and the CSR field points at the
atom's property list.   Thus  UTLISP  uses  shallow  binding  for
storing/retrieving  atom  values.  There are no bignums (although
these are not usually necessary anyway with a 60 bit  word  size)
or  smallnum  (inums,  etc.  -  i.e.  no  encoding  of numbers in
pointer fields). The garbage collection scheme is mark and sweep,
the mark phase is based on Algorithm E in Chapter 2 of Knuth.

UTLISP 5.1 has most of the  traditional  accoutrements  (property
lists,  arrays,  funargs  that  work,  macros, read/print macros,
etc.), plus a  reader  with  flexible  lexical  classes,  virtual
memory  that allows automatic retrieval of code from disk for in-
terpreted functions, overlays, interrupts, etc.  It also has  the
essential  runtime  supports:  editor, BREAK, a MAKEFILE package,
Help,  dynamic  memory  support,  etc.  The  editor,  BREAK,  and
MAKEFILE  (DSKOUT)  packages  use  the  corresponding features in
UCILISP as models (in particular, the UTLISP editor is a slightly
scaled-down  version of the UCILISP editor).  UTLISP lacks: abort
error recovery, ASCII (except for the Help system), strings,  ef-
ficient  vectors, a binary loader for compiled code, LEXPRS, etc.
UTLISP does provide extensive error-checking for the arguments of
system functions, and random and binary I/O.

I will mention only the OS that I am running the benchmarks on  -
UT-2D.   UT-2D  is  a  home-grown  OS  that  runs on a Dual Cyber
170/750 system.  Each Cyber has 256k of  central  memory  and  20
peripheral   processors.    They   communicate  through  492k  of
extended-core storage. There are 3 844-4 disk systems (957  mega-
characters  each)  and  2  885  disk systems (2768 megacharacters
each).  Each CPU has a 400 nanosecond cycle time with  a  maximum
transfer  rate  of one word each 50 nanoseconds; memory is organ-
ized into eight independent banks.  Some  disk  numbers:  average
seek  time  of  30  msec. for the 844s and 25 msec. for the 885s,
transfer  rate  of  6.45  megabits/sec  for  the  844s  and  9.58
megabits/sec.  for  the  885s.   UT-2D  itself is non-paged, non-
virtual anything.  Programs are restricted to 128k batch and  32k
interactive presently although the interactive memory size is due
to increase to 128k this summer with the installation of some new
semiconductor memory replacing the current extended-core storage.
The system supports about 170 conversational users  through  MOD-
COMP  III  and  Interdata 8/32 front-ends plus a full batch load.
One Cyber is usually dedicated to running batch jobs and the oth-
er runs conversational jobs.

As hardware for UTLISP goes, the Cyber 170/750  is  not  terribly
friendly.  There is no hardware support for recursion or environ-
ment manipulation.  There are no machine instructions for direct-
ly   accessing   the   fields   of   a   LISP   atom.    The  CPU
pipeline/instruction  stack is large and  slow  to  clear  during
jumps, which UTLISP does frequently and with mad abandon.

UTLISP is available to all who want it.  The  user  community  is
divided  primarily  into  classroom use to teach LISP/AI concepts
and a fair number of sites wanting UTLISP in order to run  the  U
of Utah's REDUCE system.



-------

∂26-May-81  0916	George J. Carrette <GJC at MIT-MC> 	benchmark.    
Date: 26 May 1981 12:16-EDT
From: George J. Carrette <GJC at MIT-MC>
Subject: benchmark.
To: RPG at MIT-MC

Files: "MC:LIBDOC;BENCH >"
       "MC:SCHEME;SCAM >"
       "MC:SCHEME;CHURCH >"

Test on MC is in "MC:SCHEME;SCAM KL10"

I'll have a LISPM and NIL test soon too.


∂09-Aug-81  1912	RPG   via CMU-20C 	Vacation   
To:   lisptiming at SU-AI   
In case some of you are wondering what is up with the Lisp timing
project, it is temporarily on vacation while I am at CMU working on S-1 Lisp.
In the fall, arpa is partially funding the effort and I will have a grad
student to translate many of the benchmarks for the various sites.
See you in the fall.
			-rpg-

∂20-Oct-81  1527	LYNCH at USC-ISIB 	Benchmarks for Interlisp-VAX   
Date: 20 Oct 1981 1522-PDT
From: LYNCH at USC-ISIB
Subject: Benchmarks for Interlisp-VAX
To:   Rindfleisch at AIM, Feigenbaum at AIM, RPG at SAIL,
To:   Pratt at AIM, Masinter at PARC, Balzer at ISIF,
To:   csvax.fateman at BERKELEY, CBF at MIT-MC, vanmelle at AIM,
To:   Schoen at AIM, CSD.Novak at SCORE
cc:   DDyer, RBates, Voreck, Saunders, Lynch

Isn't there a quote somewhere about "lies, damned lies and statistics"?
Benchmarks of complicated programming environments like Lisp are 
probably in an even less illustrious position.  We took
the TAK function and ran it on our Interlisp-VAX for the 11/780 we 
have here at ISI.  Dave Dyer just typed in the little program and
compiled it and ran it and it took 10.5 seconds of CPU time
to run for the case TAK(18. 12. 6.).  He then replaced GREATERP by
IGREATERP and it took 5.0 seconds.  That is what it did today.
We are in the process of doing a complete rewrite of the compiler
(the existing one was done in a hurry with the aim of getting something
up quick that is "correct") and expect some gains in execution 
speed to be made when it is done.  CAn we tune it to make TAK go
much faster?  Sure!  But we probably won't make that specific
attempt.  What matters to programmers?  Speed or habitability?
The answer is of course: both.  ISI has aimed for habitability
and completeness first.  WE are about there.  Now we go for speed.
That will take a long time as all system developers know.
For Interlisp on the VAX there is the issue of having it
behave "well" in the timesharing environment as opposed
to taking over the whole machine in a single user environment.
At this point we have ignored that issue (assumed the single user
enviornment) and expect that the loudest cries from new users
will come from their bretheren who are unlucky enough to be
on the same machine with them at the same time.  Not at
all unlike the current situation with PDP-10s, eh?

Does anyone wish to nominate a set
of meaningful benchmarks that most of us can code up
and run?  Or will they each generate more questions than 
answers?

Dan 
-------

∂20-Oct-81  1614	Doug Lenat <CSD.LENAT at SU-SCORE> 	Save the Dolphins  
Date: 20 Oct 1981 1613-PDT
From: Doug Lenat <CSD.LENAT at SU-SCORE>
Subject: Save the Dolphins
To: pratt at SU-HPP-VAX, balzer at USC-ISI, masinter at PARC-MAXC,
    lynch at USC-ISIB, feigenbaum at SUMEX-AIM, rindfleisch at SUMEX-AIM,
    rpg at SU-AI, csd.novak at SU-SCORE
cc: csd.lenat at SU-SCORE

After carefully isolating myself from the Dolphin versus X  controversy
up until now, I feel I must at least send a brief note on behalf of the
cetaeceans.  

First of all, what is this business of comparing "reported cpu time" on
large machines with elapsed time on small ones?  What happened to
time for garbage collection, changing virtual images, etc. on the big
machines?

Second of all, where do I go to buy time on a 2060 with load avg of 1?
Most of the big machines I know crowd up to the point where they are
just barely not usable for ANY Interlisp jobs during the day.

Third of all, where do you spend your typical daytime moments
when coding?  95% of my time goes into editing, browsing through
old functions, debugging my code on tiny-cpu-time-usage calls on
EVAL (usually from within the editor, which is something
MACLISP lacks, of course).  For all of these activities, the
Dolphin has proven itself to me (over the past year) to be
at least as good as an unloaded 2060 (yes, I can find them at night),
and in many ways superior.  Superiority comes from tiny places
(big screen, plus decent window packages, mean less groping and
redisplaying), from the complete absence of timesharing (I never
see a delay during editing on a DOlphin, but I really do notice
one when I go back and use a 2060, even with load avg in the 1-2 range),
and from the predictability of the response of the machine (I never
have to come in and bemoan the fact that the system is down, or
that it is crowded much more/less than I expected, etc.)

Fourth of all, one can get to know what things take a long time
on Dolphins (making and loading files, running interpreted code)
and minimize doing them.  One approach is to type CLEANUP) just
before going home each night.  I have left my Dolphin running
for several days and nights straight, doing computations, with no
worry about the machine crashing on me.  (At least, I wasn't
concerned UNTIL Larry Masinter was impressed that I live like that!)
Anyway, the only runs where one cares overmuch about cpu time
can be done on compiled code.  Gordon Novack told me that his
run was done on interpreted code. The fraction
interp-running-time/compiled-running-time is much larger for
Dolphins than for other machines, so it is not surprising that
a large interpreted program languishes on the Dolphin.

Fifth, I have been amazed at the responsiveness of Masinter, Sheil,
and the other Interlisp-D group folk, when faced with
complaints.  They have built up a very high credibility with me
by now, and I take their predictions about future improvements
quite seriously.  The Dolphin has grown much better over the year
or so I've used it.  The slowness of the interpreter is one of
the problems they intend to correct -- and the slow dealings with
files should be ameliorated as well.

Sixth, what is this fuss about CONSing?  I don't get paid by the
size of the data structures I produce, but with the appropriateness
of their contents.  Most of my time is spent doing
GET, PUT, CAR, CDR, and logical operations (including COND).
Maybe only 2-4 per cent of my time is spent in CONSing.
The complaint about slow function calling is valid, and is another
item near the top of the Dolphin group's stack.

Seventh is machine reliability.  it is.  As I said, I use it
for periods of days at a time between sysouts with
little worry.  Mine has never broken down, nor has anyone ever
commented to me that theirs has.  Bugs in Interlisp-D itself
were common a year ago, a weekly occurrence six months ago,
and virtually nonexistent now.  There is a bad phenomenon
called sandbarring that occasionally slows the printing down
to a crawl, and that is probably one of the chief culprits
in Novack's results.  This occurs during the running of interpreted
code, and is at the top of the Dolphin lisp group's stack to fix.

Eighth and finally, I spend most of my programming time on
Dolphins.  I have acess to several machines of various types,
and often could log in to a nearly-empty machine with
INTERLISP-10 if I chose to, but I do not.  The quality of life
on the Dolphin is too high for me to consider switching back.
I have read notes where folks glibly equate N Dolphins to a
KL10 or a 2060; figurs of N=25 or 40 have been mentioned.
For my money, and my time, a figure of about N=3 is right.

Doug Lenat

PS: I could supply timing data on Eurisko, showing it to run
about a factor of 4-5 times slower on a Dolphin than on a 2060,
but that would tend to obscure the majority of the 8 points above,
and would lend an authenticity and weight to the other recent
"benchmarks" that they do not deserve.  It's not that mine would
be more accurate, just that NONE of the tests we're doing is
accurate enough -- using different flag options to the compiler
has produced runtime difference sof over an order of magnitude
ona  2060 in runtime of the final compiled code.
-------

∂20-Oct-81  1744	pratt@Diablo (SuNet) 	Benchmarks for Interlisp-VAX
Date: 20 Oct 1981 17:36:24-PDT
From: pratt at Diablo
To: Feigenbaum@AIM, LYNCH@USC-ISIB, RPG@SAIL, Rindfleisch@AIM, balzer@isif,
    cbf@mc, csd.novak@score, csvax.fateman@berkeley, masinter@parc,
    schoen@aim, vanmelle@aim
Subject: Benchmarks for Interlisp-VAX
Cc: DDyer@usc-isib, RBates@usc-isib, Saunders@usc-isib, Voreck@usc-isib

A recent Score bboard message of mine describing the results of some 
benchmarks had a subject line of "Lies, damned lies, and benchmarks."
Benchmarks, like taxes and exams, are unjust, unpopular, but unavoidable.

Here is an excerpt from a note I sent a few days ago to the Stanford Computer 
Science Department Computing Facilities Committee, spelling out the criteria 
I apply to benchmarks.  Note that the criteria are not meant to yield the 
"universal benchmark," which does not exist, but rather to yield programs 
whose behavior on a machine will be suggestive of that machine's day-to-day 
performance.

begin excerpt
-----------------

Here are the criteria I have been using to date in choosing Lisp
benchmarks.

1. The benchmark should solve a problem whose computer solution is frequently 
called for.

2. The programming style used should be one that helps make programs more 
writable, readable, maintainable, and portable.

3.  Subject to criterion 2, the benchmark should implement an efficient 
solution to the problem it solves.

Criterion 1 is to avoid the complaint commonly made about some benchmarks 
that they do not test real world problems.  Criterion 2 attempts to live up
to standards recommended by software engineers based on relative costs of
people and hardware, a ratio that programmers continue to have difficulty
believing in and adjusting to.  Criterion 3 supplements criterion 1 by weeding
out programs that would not arise in practice on account of being too
inefficient.  (For the most realistic benchmarks, criterion 3 should receive
the same degree of emphasis as in typical programming; since this emphasis is
highly variable the criterion may safely be interpreted somewhat vaguely.)

Customer benchmarks can afford to meet criterion 1 more narrowly than general
benchmarks, in that "wide use" can be construed to mean "heavily used by the
customer."  Thus for HPP purchases it makes sense to use HPP programs as
benchmarks, at least up to a point.  Current HPP programs give an accurate 
estimate for immediate applications, but this accuracy starts to drift as 
programs, programmers, and programming styles change.  Thus for long-range 
predictions, programs of general utility, in particular programs that have 
found frequent application over a period of many years in many places and can 
be expected to continue to show up as subroutines in many programs, make for 
more satisfactory benchmarks.

A machine that requires benchmarks not meeting criterion 1 in order to look
good is not tuned for real world problems.  If a programming style that 
defeats criterion 2 is needed for a machine not to look like a dog compared 
to other machines then that machine is not tuned for the economics of today's 
computing milieu.  Defeating criterion 3 to reduce the performance gap 
between machines is like holding Olympic track events in molasses.

----------------
end excerpt

I would add to these criteria the following advice on interpreting benchmark 
timings:

1.  Don't take any time seriously other than real (wall-clock) time.  If you
are on a time-shared computer, you have two variables to contend with: real
time and deviation from typical load.  A one-datum benchmark will measure
real time under typical load conditions, for more detail you can plot real 
time against load factor.  Personal computers are simpler with only one 
variable to worry about, real time.

2.  Don't treat factors of 2 seriously for a single benchmark.  You can 
easily find variations of this magnitude and more in the ratio of the 
performance of two machines, by changing programmer, language, and/or 
benchmark.  Differences between machines only start to become significant 
when you have observed them systematically over a representatively broad 
range of benchmarks, holding constant only those things you are actually 
trying to measure (such as the machine itself, or the machine and a 
particular compiler).

Three simple benchmarks I have been looking at recently were chosen from the 
areas of algebraic manipulation, formal logic, and sorting.  The respective 
functions are:

1.  deriv(exp) which computes the derivative with respect to X of the 
rational expression 'exp' (a rational expression is one involving variables, 
constants, PLUS, DIFFERENCE, TIMES, and QUOTIENT), without attempting any 
simplification of the result;

2.  dnf(wff) which converts the well-formed formula 'wff' (consisting of 
propositional variables combined with NOT, AND, and OR, where AND and OR may 
take any number of arguments) to disjunctive normal form (a list of lists of 
literals, where a typical literal is either the variable P or (NOT P));

3.  sort(L) which sorts list 'L' using merge sort, using lists rather than arrays
to hold intermediate values.

I have coded up my own Maclisp and Interlisp implementations of these and stored
them on <CSD.PRATT> at Score, with respectively .L and .IL suffixes (so that
e.g. the Interlisp sort is called SORT.IL).  I'd be interested in seeing
(a) these exact programs timed on other Lisps
(b) other implementations of these functions similarly timed.
The motivation behind (b) is to reduce the extent to which the benchmarks are
measuring me as well as the machines and compilers.  The reimplementations
should be run on all machines and compilers being benchmarked.

I would also like to see proposed other benchmarks that meet my three criteria
above.  Three benchmarks is a start, but do not begin to span the range of
real-world Lisp applications.  Note that the Takeuchi function fails two of my
criteria: 1 for obvious reasons, 3 because there is a closed form expression
for this function.  The usual recursive implementation of Fibonacci(n) would fail
the same two criteria, failing criterion 3 by being two exponentials slower than
a good algorithm.

It would be nice to include some very large benchmarks.  The main obstacle here is
the high cost of porting large programs between the machines being compared.  To
the extent that this obstacle can be overcome, large benchmarks are most welcome.

	Vaughan Pratt





∂21-Oct-81  0109	RPG  	Criterion 1   
To:   VRP at SU-AI, "#TIMING.MSG[TIM,LSP]" at SU-AI  

I doubt that criterion 1 has any special relevance except to
sound good (or to qualify for equal opportunity employment funds).
Here's why. What does anyone care how long the derivative function takes?
Because he (let's say) wants to do similar things in his program. What
similar things can a person want to do: Traverse list structure, make
a copy, and transform it in some way. The interesting components of
this are traversal, copying, and testing. Traversal involves car and
cdr accesses, copying involves cons, testing involves eq or a type test.

Since no one wants to do derivatives from scratch, the exact benchmark is
irrelevant unless it is totally typical or average. It is much better
to provide profiles of benchmarked or analyzed primitives or operations.
For example: cons, car access, cdr access, fixnum arithmetic, flonum
arithmetic, bignum arithmetic, function call, frame retention (InterLisp),
IO, array access, array creation, array updating, array copying, vector
access, vector creation, vector updating, vector creation, value passing,
multiple value passing, non-local control structures, variable access,
variable updating, function loading, special access, local access, binding
times, garbage collection of cons cells, arrays, fixnums, flonums, bignums,
cdr-coding efficiency, paging time, swapping time, compiling time, compiled
code speed versus interpreted code speed, hierarchy flattening time,
plist access, plist updating, function cell lookup, assignment, stack frame
creation time, etc.

Programs like deriv only test 4 of these in an untintuitive mix. Tak, at least,
is known to test 1 thing only: stack operations and the way the compiler
manages such things. It might also reveal optimizations that the compiler
can do. Tak, you claim, is uninteresting. Yet it tells more information
because it pinpoints its point of impact. Deriv, I'd say, essentially
does: it is CONS intensive.

I don't object to your benchmarks. It's just that I think you do a slight
disservice to the whole endeavor by claiming that you are choosing excellent
benchmarks when you are simply picking convenient ones or something.

Since the audience that we are aiming at shouldn't be solving problems
that are commonly solved, we should measure the various components
and possibly rank the Lisps and machines according to a well-defined
measure combining all aspects.
			-rpg-

∂17-Oct-81  2340	pratt@Diablo (SuNet) 	Fairness
Date: 17 Oct 1981 23:35:38-PDT
From: pratt at Diablo
To: equip, genesereth@score, novak@score
Subject: Fairness

(The following is in response to the Takeuc(h?)i benchmark from RPG.)

Here are the criteria I have been using to date in choosing Lisp
benchmarks.

1. The benchmark should solve a problem whose computer solution is frequently 
called for.

2. The programming style used should be one that helps make programs more 
writable, readable, maintainable, and portable.

3.  Subject to criterion 2, the benchmark should implement an efficient 
solution to the problem it solves.

Criterion 1 is to avoid the complaint commonly made about some benchmarks 
that they do not test real world problems.  Criterion 2 attempts to live up
to standards recommended by software engineers based on relative costs of
people and hardware, a ratio that programmers continue to have difficulty
believing in and adjusting to.  Criterion 3 supplements criterion 1 by weeding
out programs that would not arise in practice on account of being too
inefficient.  (For the most realistic benchmarks, criterion 3 should receive
the same degree of emphasis as in typical programming; since this emphasis is
highly variable the criterion may safely be interpreted somewhat vaguely.)

Customer benchmarks can afford to meet criterion 1 more narrowly than general
benchmarks, in that "wide use" can be construed to mean "heavily used by the
customer."  Thus for HPP purchases it makes sense to use HPP programs as
benchmarks, at least up to a point.  Current HPP programs give an accurate 
estimate for immediate applications, but this accuracy starts to drift as 
programs, programmers, and programming styles change.  Thus for long-range 
predictions, programs of general utility, in particular programs that have 
found frequent application over a period of many years in many places and can 
be expected to continue to show up as subroutines in many programs, make for 
more satisfactory benchmarks.

A machine that requires benchmarks not meeting criterion 1 in order to look
good is not tuned for real world problems.  If a programming style that 
defeats criterion 2 is needed for a machine not to look like a dog compared 
to other machines then that machine is not tuned for the economics of today's 
computing milieu.  Defeating criterion 3 to reduce the performance gap 
between machines is like holding Olympic track events in molasses.

With these criteria in mind, I'd like to defend myself against the objections
that have been raised about one of my Lisp benchmarks consing excessively.
That it was considered excessive surprised me since I thought I had 
implemented differentiation in a pretty standard Lisp style, and pretty 
efficiently at that, meeting all three of my criteria.

To see what happened in the "real world" I went over to Macsyma and took the 
derivative of the same expression used in my benchmark, 3*x*x+a*x*x+b*x+5.  
Macsyma performed around 300 conses, of which somewhere between 150 and 200 
appeared to be attributable to actually taking the derivative, the rest being 
due to parsing and other overhead.  My program performed only 61 conses, the 
lower number probably being attributable to my not attempting any 
simplification of the result.

Conclusion: I see no sustainable objection to my benchmark.

I might add that I chose it, along with two other benchmarks, purely using
my three criteria.  I had no prior expectations that it would exercise one
part of Lisp more than another, although I also did not expect that it would
serve as a universal benchmark.  There is no such thing as a universal 
benchmark; at best you can only hope to have a broad range of representative 
benchmarks.  This is why I had three benchmarks solving problems from
three distinct areas, algebraic manipulation, logic, and sorting.  Lack of time
has prevented me from covering yet more territory, and I am grateful for all
contributed benchmarks from other sources.  However if your contribution 
comes with the remark that I am being unfair in not having a sufficiently 
broad range of benchmarks (as did the Takeuchi benchmark) I will be rather 
piqued; I just don't have the resources to produce a sufficiently
representative range of benchmarks on my own.

I do not think that one should strive for fairness in benchmarking by 
trying to distribute problems according to how well a particular machine 
performs.  Fairness does not mean that everyone should get a prize, but rather
that one judge using methods that lead to informative and accurate judgments.
If it turns out that a set of benchmarks representing a suitably diverse 
range of applications runs poorly on a given machine in comparison to other 
machines, I don't consider it fair to then attempt to put that machine in a 
good light by looking specifically for benchmarks that favor that machine, 
any more than I would consider it fair to look for benchmarks that make the 
machine perform poorly relative to other machines.

In the case of the Takeuchi benchmark I get the feeling that it was chosen more
because it did no consing at all and was therefore likely to perform better 
on the Dolphin than because of any consideration of representativeness.  
Whether or not this was actually the case, I can at least raise the technical 
objection that this benchmark fails my criteria 1 and 3.  (Criterion 3 fails 
because there is a closed-form expression for Takeuchi's function, permitting 
it to be computed in constant time rather than in what appears to be 
exponential time.)

One way to come up with a benchmark that meets my three criteria but that 
should do as well as Takeuchi in making Dolphins look good would be to
implement ASSOC straightforwardly.  This would not make me any happier about 
maintaining a spirit of representativeness, but at least it would dispose of 
my technical objections to the Takeuchi benchmark.

Incidentally, the Takeuchi benchmark consumes 2.1 seconds using Franz Lisp on
Diablo.  (RPG's timings were .83 for Sail, 4.1 for the Foonly, and 11.2 for 
the Dolphin.)  For what it's worth a C version of it ran in 1.35 seconds on 
the Vax and 1.9 seconds on the Sun.

	Vaughan

∂18-Oct-81  2141	pratt@Diablo (SuNet) 	For what it's worth    
Date: 18 Oct 1981 21:40:08-PDT
From: pratt at Diablo
To: RPG@Sail, equip@DIABLO
Subject: For what it's worth

	From: Dick Gabriel <RPG at SU-AI>
	Subject: For what it's worth
	To:   equip at DIABLO  
	
	DERIV coincidentally was a very CONS intensive program. TAK
	is function call intensive and has no CONSing of any kind.
	By `fairness' I meant that it is rare in a `natural' Lisp program
	that CONSing is done in such high percentages. 

My Macsyma data didn't sway you then?  Are you saying that Macsyma is
an unnatural Lisp program or a rare one?

	If TAK in C on the SUN and VAX are interesting, how about TAK in
	FAIL on SAIL?:
		.436 seconds

Using pretty straightforward assembly language on the Sun I measured .70 
seconds.  I'm surprised the Sail/Sun gap is so small, I thought KL-10's were 
supposed to be blindingly fast.  I certainly wasn't expecting the Sun to be 62%
of a KL-10 for a function-call-intensive benchmark!

Several points:
	1. Macsyma IS a natural program: it did a lot of CONSing, but
I doubt that the percentage of CONSing to other things is anywhere
near as high as in VRP's DERIV program. This is because MACSYMA does a lot
of stuff while taking derivatives (for example, displaying the answer).
I write much Lisp code, and my code certainly does not mention several
conses per line, as Pratt's DERIV function does. The Macsyma derivative
is not Pratt's.

Any benchmark such as APPEND and DERIV is pathological. I never see such
code AS THE NORM. I see: type testing, MEMQ's, EQ's, COND's, lambda-binding,
array/vector access/assignment, function calls. The programs I deal with
do a lot of CONSing, but not at the rate that Pratt's does. If his DERIV
is natural, then the Dolphin is 260 times slower than SAIL. No one else
reports that. QED.

	2. The `art' of benchmarking is subtle. For example, what does it
mean to ``time'' a benchmark? The assembly language code I wrote was
loaded in MacLisp, and I used the timing mechanism I always do to
time things (to be consistent). The mechanism for measuring times
on SAIL is the RUNTIM UUO, which is known to measure a quantity related
to actual EBOX execution time. I'm not sure what it measures, since it
appears to count as EBOX time for my job the code run at interrupt level
while I am active (such as interrupts for character input from any user).
It may count cache filling time. Recall that the memory on SAIL consists
of 256k of 900 nanosecond memory and 1 meg of 3 microsecond memory. Until
the cache is filled I'm losing. Of course, I get charged for the execution
of RUNTIM. With a benchmark so short, this is significant.

The timing methodology counted several Lisp function calls, so
I eliminated that, redid it, and got 380 milliseconds. Even at that
I don't know what I measured. When I write up the Lisp Evaluation
results, I will read the system code, study the hardware, and try to
relate my results to some reality. Now that you know what I know
about my measurements, what did Pratt measure?
			-rpg-
∂18-Oct-81  2254	RPG@Sail (SuNet) 	Several points:       
Date: 18 Oct 1981 2246-PDT
From: Dick Gabriel <RPG at SU-AI>
Subject: Several points:    
To: equip at DIABLO

	1. Macsyma IS a natural program: it did a lot of CONSing, but
I doubt that the percentage of CONSing to other things is anywhere
near as high as in VRP's DERIV program. This is because MACSYMA does a lot
of stuff while taking derivatives (for example, displaying the answer).
I write much Lisp code, and my code certainly does not mention several
conses per line, as Pratt's DERIV function does. The Macsyma derivative
is not Pratt's.

Any benchmark such as APPEND and DERIV is pathological. I never see such
code AS THE NORM. I see: type testing, MEMQ's, EQ's, COND's, lambda-binding,
array/vector access/assignment, function calls. The programs I deal with
do a lot of CONSing, but not at the rate that Pratt's does. If his DERIV
is natural, then the Dolphin is 260 times slower than SAIL. No one else
reports that. QED.

	2. The `art' of benchmarking is subtle. For example, what does it
mean to ``time'' a benchmark? The assembly language code I wrote was
loaded in MacLisp, and I used the timing mechanism I always do to
time things (to be consistent). The mechanism for measuring times
on SAIL is the RUNTIM UUO, which is known to measure a quantity related
to actual EBOX execution time. I'm not sure what it measures, since it
appears to count as EBOX time for my job the code run at interrupt level
while I am active (such as interrupts for character input from any user).
It may count cache filling time. Recall that the memory on SAIL consists
of 256k of 900 nanosecond memory and 1 meg of 3 microsecond memory. Until
the cache is filled I'm losing. Of course, I get charged for the execution
of RUNTIM. With a benchmark so short, this is significant.

The timing methodology counted several Lisp function calls, so
I eliminated that, redid it, and got 380 milliseconds. Even at that
I don't know what I measured. When I write up the Lisp Evaluation
results, I will read the system code, study the hardware, and try to
relate my results to some reality. Now that you know what I know
about my measurements, what did Pratt measure?
			-rpg-

∂19-Oct-81  0935	RINDFLEISCH@SUMEX-AIM (SuNet) 	FYI - Other Lisp Timing Thrashes  
Date: 19 Oct 1981 0926-PDT
From: Rindfleisch at SUMEX-AIM
Subject: FYI - Other Lisp Timing Thrashes
To: Equip at SU-HPP-VAX
cc: [SUMEX] at SUMEX-AIM, ETHERNET at SUMEX-AIM, DEV at SUMEX-AIM, GRP:

   1   18 Oct  Masinter at PARC-MAXC some more races out of the past
   2   18 Oct  Masinter at PARC-MAXC timings - fyi


1 -- ************************
Mail-from: ARPANET host PARC-MAXC rcvd at 18-Oct-81 1249-PDT
Date: 18 Oct 1981 10:12 PDT
From: Masinter at PARC-MAXC
Subject: some more races out of the past
To: Rindfleisch@sumex-aim


---------------------------

Mail-from: Arpanet host MIT-MC rcvd at 26-FEB-81 2243-PST
Date: 26 Feb 1981 14:42:52-PST
From: CSVAX.fateman at Berkeley
To: CSVAX.jkf@Berkeley, jlk@mit-mc, lisp-forum@mit-mc, rz@mit-mc
Cc: CSVAX.fateman@Berkeley

 ←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←←
 |             | UCILISP | INTERLISP | MACLISP |Franz/VAX| 
 |-------------+---------+-----------+---------+---------|
 | Interpreter |   57.0  |    26.0   |  22.8   |  65.0   |
 |-------------+---------+-----------+---------+---------|
 | Compiler    |    2.90 |    15.0   |   0.69  | 1.1 **  |
 |-------------+---------+-----------+---------+---------|

 Times are for (TAK 4 2 0), where TAK is an interesting function
 defined by Mr. Ikuo Takeuchi.
 (DEFUN TAK (X Y Z)
	(COND ((GREATERP X Y)
	       (TAK (TAK (SUB1 X) Y Z) 
		    (TAK (SUB1 Y) Z X)
		    (TAK (SUB1 Z) X Y) ))
	      (T Y) ))
(**) 5.3 with (1- x) etc [no other declarations, so greaterp is closed comp.]
     4.1 with local function declaration (fast subroutine call)
     1.1 with > open compiled 
     times on a VAX 11/780 at Berkeley, Feb. 26, 1981


------------------------------------------------------------



2 -- ************************
Mail-from: ARPANET host PARC-MAXC rcvd at 18-Oct-81 1249-PDT
Date: 18 Oct 1981 09:55 PDT
From: Masinter at PARC-MAXC
Subject: timings - fyi
To: LispGroup↑, Rindfleisch@sumex-aim
Reply-To: Masinter

---------------------------

Mail-from: Arpanet host MIT-MC rcvd at 1-MAR-81 2221-PST
Date: 2 March 1981 00:55-EST
From: Charles Frankston <CBF at MIT-MC>
Subject: timings
To: CSVAX.fateman at BERKELEY
cc: LISP-FORUM at MIT-MC, masinter at PARC-MAXC, RWS at MIT-XX,
    guttag at MIT-XX

It is rather obvious that the timings you distributed are wall times for
the Lisp Machine, whereas the Vax and MC times count only time spent
directly executing code that is considered part of Macsyma.  Ie. the
Vax and MC times exclude not only garbage collection, but operating system
overhard, disk i/o and/or paging, time to output characters to terminals, etc.

I submit comparing wall times with (what the Multics people call) "virtual
CPU" time, is not a very informative excercise.  I'm not sure if the Lisp
Machine has the facilities to make analagous measurements, but everyone
can measure wall time, and in some ways thats the most useful comparison.
Is anyone willing to try the same benchmarks on the Vax and MC with just
one user on and measureing wall times?

Also, are there yet any Lisp machines with greater than 256K words?  No
one would dream of running Macsyma on a 256K word PDP10 and I presume that
goes the same for a 1 Megabyte Vax.  The Lisp Machine may not have a time
sharing system resident in core, but in terms of amount of memory needed
for operating system overhard, the fanciness of its user interface
probably more than makes up for that.  I'll bet another 128K words of
memory would not be beyond the point of diminishing returns, insofar
as running Macsyma.

Lastly, the choice of examples.  Due to internal Macsyma optimizations,
these examples have a property I don't like in a benchmark.  The timings
for subsequent runs in the same environment differ widely from previous
runs.  It is often useful to be able to factor out setup times from a
benchmark.  These benchmarks would seem to run the danger of being
dominated by setup costs.  (Eg. suppose disk I/O is much more expensive
on one system; that is probably not generally interesting to a Macsyma user,
but it could dominate benchmarks such as these.)

I would be as interested as anyone else in seeing the various lisp systems
benchmarked.  I hope there is a reasonable understanding in the various
Lisp communities of how to do fair and accurate, else the results will be
worse than useless, they will be damaging.

------------------------------------------------------------

-------

∂19-Oct-81  1045	pratt@Diablo (SuNet) 	Several points:   
Date: 19 Oct 1981 10:43:49-PDT
From: pratt at Diablo
To: RPG@Sail, equip@DIABLO
Subject: Several points:

[Seems to me benchmarking generates more debate than information.  -vrp]

	From: Dick Gabriel <RPG at SU-AI>
	
		1. Macsyma IS a natural program: it did a lot of CONSing, but
	I doubt that the percentage of CONSing to other things is anywhere
	near as high as in VRP's DERIV program. This is because MACSYMA does a lot
	of stuff while taking derivatives (for example, displaying the answer).

The measurements I made of Macsyma's differentiator did not include the work
done during display, nor during parsing.  Nor should it if you are just 
comparing differentiation programs.  What is the "lot of stuff" Macsyma does 
WHILE taking derivatives?

	I write much Lisp code, and my code certainly does not mention several
	conses per line, as Pratt's DERIV function does.

Ok, let's see your version of DERIV.  I'll be interested to see how you manage
to use fewer conses per line.  No fair merely spreading the code over more
lines.

							  The Macsyma 
	derivative is not Pratt's.

Your argument here seems to be that because you do something Macsyma does it
too.

We can resolve the question of what Macsyma does by looking at the Macsyma 
code for DIFF, a copy of which I have requested from Jeff Golden.  (Maybe I 
should have asked Fateman, on the principle that one goes to the Soviet 
embassy to make casual inquiries about US military secrets.  I heard Moses 
was rather upset that Fateman had ported Macsyma to Franz Lisp.)

		2. ...[on the meaning of time]...
	.
	.
	.
	Now that you know what I know
	about my measurements, what did Pratt measure?

Depends on the machine, but in all cases my wristwatch is the final authority.

Sun and Dolphin:	wristwatch time over a large number of runs.  (On the
			Dolphin this seems to agree with the time returned by
			InterlispD's TIME function.)

Vax:			user cpu time as returned by the 'times' kernel call.
			The user cpu time has the following two properties:
			(1) I have observed little variation of this parameter
			over a wide range of system loads, cache usage
			considerations notwithstanding.
			(2) For very low system loads I have seen it
			come to within 90% or so of wristwatch time.
			These two observations together imply that the 'times'
			kernel call is a reliable indicator of user cpu time.

∂19-Oct-81  1143	RPG@Sail (SuNet) 	Long, silly response to Vaughn Pratt      
Date: 19 Oct 1981 1135-PDT
From: Dick Gabriel <RPG at SU-AI>
Subject: Long, silly response to Vaughn Pratt   
To: equip at DIABLO

In this message I reply to the cogent parts of Pratt's comments. The
non-sequitars in his message, which will be obvious when you briefly read
this, will be ignored, unless misunderstandings persist. Most
of you may want to delete this message now.

Ok Vaughn. Let me state this simply, so that you can understand it.

1. Your DERIV program has a higher percentage of conses than `natural' code.

2. `Natural code' means code that occurs naturally in the day-to-day
work of actual Lisp programmers. 

3. `Natural code' does not mean the `natural' codefor DERIV. (Which is
EQ to yours, and which I stated to you on several occasions already in
private).

4. Since you stated the fact that Macsyma differentiation took more conses,
I assumed that meant that it used a different program (and possibly did other
things too).

5. I never stated that your DERIV took excessive CONSes in the sense that
it was programmed badly. It, like APPEND, requires CONSes to copy the
structure, which I assume would be part of the specification.

6. Here is some `naturally' occurring code, written by Guy Steele and
myself.  It was randomly taken from pages 31, 41, and 59 (3.14159) of
ANALYZE.LSP[NIL,S1] I will point out the CONSes.  Of the approximately 153
lines below about 12 have CONSes on them. Most data structure operations
here are vector references and assignments. There are a lot of lambda's
(disguised as LETs) and control structures.  The rest of you can delete
the remainder of this message as I assume you understand my point. Vaughn,
read on:

∂19-Oct-81  1545	Jeff Rubin <JBR at S1-A> 
Date: 19 Oct 1981 1544-PDT
From: Jeff Rubin <JBR at S1-A>
To:   rpg at SU-AI

 ∂19-Oct-81  1141	RPG   via SU-AI 	RUNTIM  
can you give me the poop on what RUNTIM UUO measures on SAIL? How much
of other users, context switching, etc, do I get charged for there.
Technical details welcome.
			-rpg-

It measures EBOX milliseconds.  You might possibly be getting charged for
somebody else's spacewar or interrupt level.  I don't really remember.
You get charged for some amount of context switching and scheduling
(possibly even figuring out that it should just run you again next).
--jeff

∂21-Oct-81  1325	RPG  	Wall time
To:   pratt at DIABLO, "#TIMING.MSG[TIM,LSP]" at SU-AI    

You state:

-----
1.  Don't take any time seriously other than real (wall-clock) time.  If you
are on a time-shared computer, you have two variables to contend with: real
time and deviation from typical load.  A one-datum benchmark will measure
real time under typical load conditions, for more detail you can plot real 
time against load factor.  Personal computers are simpler with only one 
variable to worry about, real time.
-----

There are, as usual, a number of points that you slough over in this statement
which strikes me a having been given in the spirit of a aged philosopher to
young upstarts.

First, one can and must take every time reported seriously as long as there
is sufficient other material to evaluate the meaning of that time well.
For example, consider Tak(18,12,6). Suppose that there is no way to increase
the duration of a single run (for whatever reason). Since the run is so short
(on a reasonable machine) the only way to gather time via `wall clock' is
with multiple runs. In this case, unless an analysis is done on the cost
of the multiple timing control structures, you have no idea what else you
are measuring. 

Second, on a timesharing machine, the absolute cpu time (including typical
memory usage [cache misses]) along with an independent analysis of the
impact of load on real time and memory contention will provide more and
better information than a compendum of real time versus load for each
benchmark. A further piece of data would be cost of context switching
and other interrupts (and spacewar processes on SAIL). Without all of this
the timing are useless.

Third, you have included in your objective advice some personal opinion.
Namely, that personal computers are inherently better than timeshared ones.
This is under the guise of `real time' being the only serious measurement
coupled with the statement that the personal machine is simpler to measure here.
Doug Lenat made the same mistake that everyone seems to make, which is that
the personal machine is more available, and that downtime doesn't affect
them. Since you want to consider the load as impacting the worth of the
timeshared system, let me propose that the following `wall clock test' on
a normally populated personal machine environment versus a normally populatd
timesharing environment. We consider 100 random times. In each case the test
subject stands outside the door. On a signal he is to enter the room with
the terminal or computer, logs in, and runs the program. We measure transit time,
login time, loadup time and subtract them from the measurements. If someone
is at the personal machine, you wait and the clock ticks. If you
cannot log into the timeshared machine, the clock ticks. Neither may say anything
to anyone. We do this so that all time slots are represented appropriately.

I won't complicate the matter by including the incremental funding situation
when ARPA comes to see the demo of your system, and in case Ohlander watches
the one cylinder personal machine stagger though its paces while the huge timeshared
machine, with everyone gratefully off while the money is being considered,
15 MIPS' its way through the demo.

In this sense, real time on a personal machine means real time when you have
the machine in your hot little hands. So what is different about CPU time
when it is simply the measure of real time when you have the CPU in your
hot little hands? Only the scale is different. And you CAN get the timeshared
machine to yourself at critical times, and at night (which is when the average
grad student gets to use a Dolphin). If you are considering that everyone
get a personal machine, then with economics in mind, we ought to calculate the
CPU percentage of a very powerful timesahred machine as an exercise.

Lenat flames about availability. The man hours of availablility of a mainframe
is much higher than a normally populated personal environment.

			-rpg-

∂22-Oct-81  2009	George J. Carrette <GJC at MIT-MC> 	timing tests and benchmarks. 
Date: 22 October 1981 20:40-EDT
From: George J. Carrette <GJC at MIT-MC>
Subject:  timing tests and benchmarks.
To: RPG at SU-AI

Thanks. I'll give these a try. The Franz LOCF declaration
is a total crock only put into the language for use in benchmarks
it seems as you get ridiculously poor debugging if you use it.

One thing I am curious about, either quanitatively or just your feel,
is how many Lisp users (of average color) with how much memory is
the maximum on a 780 running any reasonable Lisp. I assume that NIL
will be the first reasonable Lisp on a Vax, so perhaps you can answer this.
			-rpg-
Thanks for your help.
∂10-Dec-81  1050	Jerry Roylance <GLR at MIT-AI> 	LISPM Array Timings    
Date: 10 December 1981 13:26-EST
From: Jerry Roylance <GLR at MIT-AI>
Subject: LISPM Array Timings
To: BUG-LISPM at MIT-AI
cc: GLR at MIT-AI, GJS at MIT-AI, LISPTiming at SU-AI


John Batali and I have made some simple measurements that
are given here for general information.

;; On CADR-2
(setq array				; takes 1738 seconds
      (*array nil 'fixnum 5000000))

(do ((i 0 (1+ i)))			; takes 1019 seconds
    ((>= i 5000000))
  (setf (arraycall fixnum array i)
	(- 5000000 i)))

(sort-grouped-array array 5 #'<)	; takes 6077 seconds

On CADR-8 we made a loop similar to the 1019 second one above
except it had explicit calls to PAGE-IN-ARRAY and PAGE-OUT-ARRAY
every 10000 elements and only changed every 5th element by
writing a random number into it.
Without the explicit paging, the loop took about 1100 seconds;
with the paging, about 150 seconds.  The times are approximate
because the calls to RANDOM (about 350 seconds) have to be
subtracted.

We should get some more timing information in the next few days.

∂11-Dec-81  1215	David A. Moon <MOON at MIT-MC> 	LISPM Array Timings    
Date: 11 December 1981 15:13-EST
From: David A. Moon <MOON at MIT-MC>
Subject: LISPM Array Timings
To: GLR at MIT-AI
cc: BUG-LISPM at MIT-MC, GJS at MIT-AI, LISPTiming at SU-AI

It is a known bug that arrays larger than the size of main memory
don't work very well, in particular they have to be paged in at
least twice to create them.  Fixing this requires remodularizing
part of the microcode, which is why it hasn't been done yet.  This
has been discussed over (bug lispm) several times in the past.
I don't see what purpose is served by timing things that are known
to be broken.

∂16-Dec-81  0937	Guy.Steele at CMU-10A 	TAK for S-1 
Date: 16 December 1981 1214-EST (Wednesday)
From: Guy.Steele at CMU-10A
To: rpg at SU-AI
Subject:  TAK for S-1
Message-Id: <16Dec81 121440 GS70@CMU-10A>

Attached is the S1C file for TAK.  Maybe you can adapt the code
to run on the Mark 1?
--Q
--------------------------------------------
;Dribble file from S-1 LISP compiler for function TAK

;See user-supplied code?: 

(LAMBDA (X Y Z) 
  (COND ((NOT (<& Y X)) Z)
	(T (TAK (TAK (1-& X) Y Z)
		(TAK (1-& Y) Z X)
		(TAK (1-& Z) X Y)))))

;See initial alpha-conversion (*INITIAL-VERSION*)?: 
(LAMBDA (X Y Z) 
  (IF (<& Y X)
      (IF 'T
	  (TAK (TAK (1-& X) Y Z) (TAK (1-& Y) Z X) (TAK (1-& Z) X Y))
	  'NIL)
      Z))

;Trace optimizer?: 
;***** Optimizing this form:
(IF 'T
    (TAK (TAK (1-& X) Y Z) (TAK (1-& Y) Z X) (TAK (1-& Z) X Y))
    'NIL)
;*** to be this form:
(TAK (TAK (1-& X) Y Z) (TAK (1-& Y) Z X) (TAK (1-& Z) X Y))
;***** courtesy of META-IF-LITERAL

;See result of optimization (*META-VERSION*)?: 
(LAMBDA (X Y Z) 
  (IF (<& Y X)
      (TAK (TAK (1-& X) Y Z) (TAK (1-& Y) Z X) (TAK (1-& Z) X Y))
      Z))

;There are 33 TN's.
;See TN packing?: 
;RT registers:
;RTA:    #4 [12:13/12:13]  	SIZE=1  PTRP=T  ORDER=21  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=SCRATCH-REG  ISLOC=RTA  REASON=RT  PREFS=(5)  CONFLICTS=()
;           JUMP ISTN of OWNER=(<& Y X)
;       #32 [71:72/71:72]  	SIZE=1  PTRP=()  ORDER=22  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=SCRATCH-REG  ISLOC=RTA  REASON=RTPREF  PREFS=(33 31)  CONFLICTS=()
;           SWFIX ISTN of OWNER=(1-& Z)
;       #24 [48:49/48:49]  	SIZE=1  PTRP=()  ORDER=23  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=SCRATCH-REG  ISLOC=RTA  REASON=RTPREF  PREFS=(25 23)  CONFLICTS=()
;           SWFIX ISTN of OWNER=(1-& Y)
;       #16 [25:26/25:26]  	SIZE=1  PTRP=()  ORDER=24  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=SCRATCH-REG  ISLOC=RTA  REASON=RTPREF  PREFS=(17 15)  CONFLICTS=()
;           SWFIX ISTN of OWNER=(1-& X)
;       #33 [68:69/68:69]  	SIZE=1  PTRP=()  ORDER=28  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=SCRATCH-REG  ISLOC=RTA  REASON=32  PREFS=(32)  CONFLICTS=()
;           SWFIX WANTTN of OWNER=Z, son of (1-& Z)
;       #25 [45:46/45:46]  	SIZE=1  PTRP=()  ORDER=29  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=SCRATCH-REG  ISLOC=RTA  REASON=24  PREFS=(24)  CONFLICTS=()
;           SWFIX WANTTN of OWNER=Y, son of (1-& Y)
;       #17 [22:23/22:23]  	SIZE=1  PTRP=()  ORDER=30  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=SCRATCH-REG  ISLOC=RTA  REASON=16  PREFS=(16)  CONFLICTS=()
;           SWFIX WANTTN of OWNER=X, son of (1-& X)

;Scratch memory: none.

;Pointer memory: none.

;Scratch registers:
;I:      #8 [8:10/8:10]  	SIZE=1  PTRP=()  ORDER=31  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=SCRATCH-REG  ISLOC=I  REASON=ANY  PREFS=()  CONFLICTS=()
;           SWFIX WANTTN of OWNER=X, son of (<& Y X)
;J:      #6 [5:9/5:9]  	SIZE=1  PTRP=()  ORDER=32  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=SCRATCH-REG  ISLOC=J  REASON=ANY  PREFS=()  CONFLICTS=(8)
;           SWFIX WANTTN of OWNER=Y, son of (<& Y X)

;Pointer registers:
;A:      #5 [14:15/14:15]  	SIZE=1  PTRP=T  ORDER=26  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=POINTER-REG  ISLOC=A  REASON=ANY  PREFS=(4)  CONFLICTS=()
;           JUMP WANTTN of OWNER=(<& Y X)

;Arguments:
;*       #0 [0:75/0:75]  	SIZE=1  PTRP=T  ORDER=2  WEIGHT=4  DEPENDENTS=()
;           WANTLOC=ARGUMENT  ISLOC=0  REASON=ARGUMENT  PREFS=()  CONFLICTS=()
;           Random TN of OWNER=(LAMBDA (X Y Z) (IF # # Z))
;*       #1 [1:78/1:78]  	SIZE=1  PTRP=T  ORDER=1  WEIGHT=4  DEPENDENTS=()
;           WANTLOC=ARGUMENT  ISLOC=1  REASON=ARGUMENT  PREFS=()  CONFLICTS=()
;           Random TN of OWNER=(LAMBDA (X Y Z) (IF # # Z))
;*       #2 [2:97/2:67]  	SIZE=1  PTRP=T  ORDER=0  WEIGHT=4  DEPENDENTS=()
;           WANTLOC=ARGUMENT  ISLOC=2  REASON=ARGUMENT  PREFS=()  CONFLICTS=()
;           Random TN of OWNER=(LAMBDA (X Y Z) (IF # # Z))

;Stack locations:
;*      #12 [16:87/16:87]  	SIZE=1  PTRP=T  ORDER=20  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=0  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN/ISTN of OWNER=TAK, son of (TAK (TAK # Y Z) (TAK # Z X) (TAK # X Y))
;*      #14 [18:34/18:34]  	SIZE=1  PTRP=T  ORDER=19  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=6  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN/ISTN of OWNER=TAK, son of (TAK (1-& X) Y Z)
;*      #15 [27:35/27:35]  	SIZE=1  PTRP=T  ORDER=7  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=12  REASON=STACK  PREFS=(16)  CONFLICTS=()
;           POINTER WANTTN of OWNER=(1-& X)
;*      #19 [30:36/30:36]  	SIZE=1  PTRP=T  ORDER=18  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=13  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN of OWNER=Y, son of (TAK (1-& X) Y Z)
;*      #20 [33:37/33:37]  	SIZE=1  PTRP=T  ORDER=17  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=14  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN of OWNER=Z, son of (TAK (1-& X) Y Z)
;*      #13 [39:88/39:88]  	SIZE=1  PTRP=T  ORDER=8  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=6  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN/ISTN of OWNER=(TAK (1-& X) Y Z)
;*      #22 [41:57/41:57]  	SIZE=1  PTRP=T  ORDER=16  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=7  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN/ISTN of OWNER=TAK, son of (TAK (1-& Y) Z X)
;*      #23 [50:58/50:58]  	SIZE=1  PTRP=T  ORDER=5  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=13  REASON=STACK  PREFS=(24)  CONFLICTS=()
;           POINTER WANTTN of OWNER=(1-& Y)
;*      #27 [53:59/53:59]  	SIZE=1  PTRP=T  ORDER=15  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=14  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN of OWNER=Z, son of (TAK (1-& Y) Z X)
;*      #28 [56:60/56:60]  	SIZE=1  PTRP=T  ORDER=14  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=15  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN of OWNER=X, son of (TAK (1-& Y) Z X)
;*      #21 [62:89/62:89]  	SIZE=1  PTRP=T  ORDER=6  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=7  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN/ISTN of OWNER=(TAK (1-& Y) Z X)
;*      #30 [64:80/64:80]  	SIZE=1  PTRP=T  ORDER=13  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=8  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN/ISTN of OWNER=TAK, son of (TAK (1-& Z) X Y)
;*      #31 [73:81/73:81]  	SIZE=1  PTRP=T  ORDER=3  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=14  REASON=STACK  PREFS=(32)  CONFLICTS=()
;           POINTER WANTTN of OWNER=(1-& Z)
;*      #35 [76:82/76:82]  	SIZE=1  PTRP=T  ORDER=12  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=15  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN of OWNER=X, son of (TAK (1-& Z) X Y)
;*      #36 [79:83/79:83]  	SIZE=1  PTRP=T  ORDER=11  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=16  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN of OWNER=Y, son of (TAK (1-& Z) X Y)
;*      #29 [85:90/85:90]  	SIZE=1  PTRP=T  ORDER=4  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=8  REASON=STACK  PREFS=()  CONFLICTS=()
;           POINTER WANTTN/ISTN of OWNER=(TAK (1-& Z) X Y)
;*      #11 [92:93/92:93]  	SIZE=1  PTRP=T  ORDER=9  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=0  REASON=STACK  PREFS=(10)  CONFLICTS=()
;           POINTER ISTN of OWNER=(TAK (TAK # Y Z) (TAK # Z X) (TAK # X Y))
;*      #10 [94:95/94:95]  	SIZE=1  PTRP=T  ORDER=25  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=RESULT  REASON=3  PREFS=(11 3)  CONFLICTS=()
;           POINTER WANTTN of OWNER=(TAK (TAK # Y Z) (TAK # Z X) (TAK # X Y))
;*      #37 [98:99/18:19]  	SIZE=1  PTRP=T  ORDER=27  WEIGHT=0  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=RESULT  REASON=3  PREFS=(3)  CONFLICTS=()
;           POINTER WANTTN of OWNER=Z, son of (IF (<& Y X) (TAK # # #) Z)
;*       #3 [100:102/96:98]  	SIZE=1  PTRP=T  ORDER=10  WEIGHT=1  DEPENDENTS=()
;           WANTLOC=STACK  ISLOC=RESULT  REASON=STACK  PREFS=(37 10)  CONFLICTS=()
;           POINTER WANTTN/ISTN of OWNER=(IF (<& Y X) (TAK # # #) Z)


;TN locations by ID number:
;  #0=ARG 0            #8=I               #16=RTA             #24=RTA             #32=RTA            
;  #1=ARG 1           #10=STACK RESULT    #17=RTA             #25=RTA             #33=RTA            
;  #2=ARG 2           #11=STACK 0         #19=STACK 13        #27=STACK 14        #35=STACK 15       
;  #3=STACK RESULT    #12=STACK 0         #20=STACK 14        #28=STACK 15        #36=STACK 16       
;  #4=RTA             #13=STACK 6         #21=STACK 7         #29=STACK 8         #37=STACK RESULT   
;  #5=A               #14=STACK 6         #22=STACK 7         #30=STACK 8        
;  #6=J               #15=STACK 12        #23=STACK 13        #31=STACK 14       

;TN locations by order of packing:
;  0=#2 ARG 2             9=#11 STACK 0         18=#19 STACK 13        27=#37 STACK RESULT   
;  1=#1 ARG 1            10=#3 STACK RESULT     19=#14 STACK 6         28=#33 RTA            
;  2=#0 ARG 0            11=#36 STACK 16        20=#12 STACK 0         29=#25 RTA            
;  3=#31 STACK 14        12=#35 STACK 15        21=#4 RTA              30=#17 RTA            
;  4=#29 STACK 8         13=#30 STACK 8         22=#32 RTA             31=#8 I               
;  5=#23 STACK 13        14=#28 STACK 15        23=#24 RTA             32=#6 J               
;  6=#21 STACK 7         15=#27 STACK 14        24=#16 RTA            
;  7=#15 STACK 12        16=#22 STACK 7         25=#10 STACK RESULT   
;  8=#13 STACK 6         17=#20 STACK 14        26=#5 A               

;See code?: 
(S1LAP TAK)
(DEFINE %SETUP (%FUNCTION-NAME)
  ((ALLOC 6) T2 (SP 24))
  (MOV (SP -24) (REF (@ (QUOTE %FUNCTION-NAME)) (SQ *:SQ-SYMBOL-FUNCTION-CELL) S)))
(DEFINE %CALL (%TYPE %NARGS)
  ((MOVP P A) FP (PR SP (- 128 (+ (- *:FRAME-ARGUMENTS *:FRAME-RETURN-VALUE) (* %NARGS 4)))))
  ((MOVP P A) CP (PR FP *:FRAME-SPARE-PC-SLOT))
  (MOV RTA (? (POINTER %TYPE %NARGS)))
  (JSP (PR FP *:FRAME-RETURN-PC) (@ (REF CP (* *:PROCEDURE-CODE 4)))))
	((JMPZ NEQ Q) RTA-Q0 (@ (REF SQ *:SQ-WRONG-TYPE-OF-FUNCTION)))
						;Jump if other than single pointer value desired
	((JMPZ NEQ) (? -3 RTA) (SQ *:SQ-WRONG-NUMBER-OF-ARGUMENTS))
						;Error if not exactly 3 arguments
	((SKP LSS S) (FP -100) (FP -104) L0018)	;Branch if (<& Y X)
	(MOV (FP *:FRAME-RETURN-VALUE) (FP -96))
	(JMPA NIL L0019)
L0018 	(%SETUP TAK)				;Set up to call TAK
	(%SETUP TAK)				;Set up to call TAK
	(DEC RTA (FP -104))			;(1-& X)
	((PUSH UP) SP RTA)
	((PUSH UP) SP (FP -100))
	((PUSH UP) SP (FP -96))
	(%CALL 0 3)				;Call for (TAK (1-& X) Y Z)
	(%SETUP TAK)				;Set up to call TAK
	(DEC RTA (FP -100))			;(1-& Y)
	((PUSH UP) SP RTA)
	((PUSH UP) SP (FP -96))
	((PUSH UP) SP (FP -104))
	(%CALL 0 3)				;Call for (TAK (1-& Y) Z X)
	(%SETUP TAK)				;Set up to call TAK
	(DEC RTA (FP -96))			;(1-& Z)
	((PUSH UP) SP RTA)
	((PUSH UP) SP (FP -104))
	((PUSH UP) SP (FP -100))
	(%CALL 0 3)				;Call for (TAK (1-& Z) X Y)
	(%CALL 0 3)				;Call for (TAK @ @ @)
	((POP UP) (FP *:FRAME-RETURN-VALUE) SP)	;Store away result of call
L0019 	((MOV D D) T1 CP)			;Function exit:
	((MOVMS 3) TP (FP *:FRAME-OLD-TP))	; restore TP, CP, and FP for caller,
	(RETSR T2 (T2 *:FRAME-OLD-TP))		; and return, saving spare PC slot in T1.
()

;See statistics?: 
;     35  number of internal-format program nodes created
;      0  CASEQ of a literal reduces to a clause
;      0  CASEQ of a PROGN becomes PROGN of a CASEQ
;      0  CASEQ of a LET becomes LET of a CASEQ
;      0  CASEQ of an IF becomes IF with CASEQ as one arm
;      0  CASEQ of an IF becomes IF with clauses for both arms
;      0  CASEQ of an IF becomes a single clause
;      0  IF of an IF is involuted (anchor pointing)
;      1  IF of a literal has dead arm eliminated
;      0  IF of a PROGN becomes PROGN of an IF
;      0  IF of a LET becomes LET of an IF
;      0  statements eliminated from a PROGBODY
;      0  statements eliminated from a PROGN
;      0  PROGN of one statement simplified to the statement
;      0  one PROGN within another is flattened out
;      0  a CALL to a primop with literal arguments is folded
;      0  a call with one argument is reduced to that argument
;      0  a call with a literal idempotent argument is reduced
;      0  a literal identity argument is eliminated from a call
;      0  a call with some literal arguments is partially folded
;      0  a call with a literal nilpotent argument is reduced
;      0  a call with one argument is reduced to a nilpotent
;      0  a call with more than two arguments is reduced to dyadic calls
;      0  FUNCALL of a known PRIMOP is made a PRIMOP CALL
;      0  a CALL to a known function with literal arguments is folded
;      0  &OPTIONAL parameter of a LAMBDA in a CALL is made &REQUIRED
;      0  &REST parameter of a LAMBDA in a CALL is made &REQUIRED
;      0  unreferenced &REQUIRED parameter is flushed
;      0  unreferenced argument with no side effects is flushed
;      0  ((LAMBDA () FOO)) becomes FOO
;      0  substitutions of an argument for a parameter
;      0  attempts to propagate substitution into a LET body
;      0  successes at propagating substitution into a LET body
;      0  replacement of a SETQ of an unreferenced variable by the SETQ body
;      0  the statements of a PROGN are permuted
;      0  PDLNUM relaxation reanalysis of a CALL-LAMBDA
;     38  number of TN's generated
;      5  number of TN's removed by REMTN
;     32  number of instructions and macro calls emitted
;      2  number of tags emitted
;      8  number of S1LAP macro calls emitted
;     17  number of XOP instructions emitted
;      3  number of TOP instructions emitted
;      3  number of JOP instructions emitted
;      1  number of SOP instructions emitted

;End of S-1 compiler dump file PS:<S1LISP.COMPILER>TAK.S1C

∂18-Dec-81  2112	Earl A. Killian            <Killian at MIT-Multics> 	tak    
Date:   19 December 1981 0007-est
From:   Earl A. Killian            <Killian at MIT-Multics>
Subject:        tak
To:     Guy Steele at CMUa, RPG at SAIL

CBF said you were compiling TAK for the Mark IIA, so I thought I'd send
you Amber's Pascal's code for it.  Here it is:

                           ;;;     START BLOCK
00000004                   tak:
00000004  6203 0140 3713           ALLOC.7        r24, (sp)13*4
00000010  6172 3774 0377           ENTRY          (sp)-4*4, (pc)-1*4                 ; ., tak
00000014  5343 0160 0040           MOV.S.S        r28, r8                            ; x, .
00000020  5343 0154 0044           MOV.S.S        r27, r9                            ; y, .
00000024  5343 0150 0050           MOV.S.S        r26, r10                           ; z, .
                                                                                     ; LINE 3
00000030  0253 0154 0160           SKP.LSS.S      r27, r28, L61                      ; y, x, 44
00000034                   L96:
                                                                                     ; LINE 4
00000034  5343 0054 0150           MOV.S.S        r11, r26                           ; tak, z
00000040  4111 0000 0023           JMPA           L66                                ; 154
00000044                   L61:
                                                                                     ; LINE 5
00000044  4267 0040 0160           DEC.S          r8, r28                            ; ., x
00000050  5343 0044 0154           MOV.S.S        r9, r27                            ; ., y
00000054  5343 0050 0150           MOV.S.S        r10, r26                           ; ., z
00000060  4177 3676 7765           CALL           (fp)-2*4, tak                      ; ., 4
00000064  5343 0144 0054           MOV.S.S        r25, r11
00000070  4267 0040 0154           DEC.S          r8, r27                            ; ., y
00000074  5343 0044 0150           MOV.S.S        r9, r26                            ; ., z
00000100  5343 0050 0160           MOV.S.S        r10, r28                           ; ., x
00000104  4177 3676 7760           CALL           (fp)-2*4, tak                      ; ., 4
00000110  5343 0140 0054           MOV.S.S        r24, r11
00000114  4267 0040 0150           DEC.S          r8, r26                            ; ., z
00000120  5343 0044 0160           MOV.S.S        r9, r28                            ; ., x
00000124  5343 0050 0154           MOV.S.S        r10, r27                           ; ., y
00000130  4177 3676 7753           CALL           (fp)-2*4, tak                      ; ., 4
00000134  5343 0050 0054           MOV.S.S        r10, r11
00000140  5343 0040 0144           MOV.S.S        r8, r25
00000144  5343 0044 0140           MOV.S.S        r9, r24
00000150  4177 3676 7747           CALL           (fp)-2*4, tak                      ; ., 4
00000154                   L66:
00000154                   L58:
00000154  5414 0140 3671           MOVMS.5        r24, (fp)-7*4
00000160  6173 3677 3671           UNCALL         (fp)-1*4, (fp)-7*4
                           ;;;     END BLOCK
                                   .END

∂07-Jan-82  1311	RPG  
xcon.reti@dec-marlboro for benchmarks on Franz, InterLisp, NIL.

∂13-Jan-82  1015	Kalman Reti <XCON.RETI at DEC-MARLBORO> 	Re: Benchmarks     
Date: 13 Jan 1982 1129-EST
From: Kalman Reti <XCON.RETI at DEC-MARLBORO>
To: RPG at SU-AI
Subject: Re: Benchmarks    
Message-ID: <"MS5(2020)+GLXLIB1(1056)" 11791941549.33.300.4495 at DEC-MARLBORO>
Regarding: Message from Dick Gabriel <RPG at SU-AI>
              of 7-Jan-82 1852-EST

I am in the process of doing the benchmarks; JONL was referring to some VERY
preliminary info.  I expect write a report on my results and will include you
in its distribution.  I expect to be done within about a week, but my
schedule is very hectic and I can't guarantee it.
   --------

∂29-Jan-82  2149	Kim.fateman at Berkeley 	Okay, you hackers   
Date: 29 Jan 1982 20:31:23-PST
From: Kim.fateman at Berkeley
To: guy.steele@cmu-10a
Subject: Okay, you hackers
Cc: common-lisp@SU-AI

I think that when GJC says that NIL/Macsyma runs the "X" demo, it
is kind of like the dog that plays checkers.  It is
remarkable, not for how well it plays, but for the fact that it plays at all.

(And I believe it is creditable [if] NIL runs Macsyma at all... I
know how hard it is, so don't get me wrong..)
Anyway, the stardard timings we have had in the past, updated somewhat:

MC-Macsyma, Vaxima and Lisp Machine timings for DEMO files
(fg genral, fg rats, gen demo, begin demo)
(garbage collection times excluded.)  An earlier version of this
table was prepared and distributed in April, 1980.  The only
column I have changed is the 2nd one.

MC Time	     VAXIMA    	128K lispm     192K lispm       256K lispm
4.119	   11.8   sec.  43.333 sec.     19.183 sec.    16.483 sec.  
2.639	    8.55  sec.  55.916 sec.     16.416 sec.    13.950 sec. 
3.141	   14.3   sec. 231.516 sec.     94.933 sec.    58.166 sec.  
4.251	   13.1   sec. 306.350 sec.    125.666 sec.    90.716 sec. 


(Berkeley VAX 11/780 UNIX (Kim) Jan 29, 1982,  KL-10 MIT-MC ITS April 9, 1980.)
Kim has no FPA, and 2.5meg of memory.  Actually, 2 of these times are
slower than in 1980, 2 are faster. 

Of course, GJC could run these at MIT on his Franz/Vaxima/Unix system, and
then bring up his NIL/VMS system and time them again.

∂19-Feb-82  1603	Richard J. Fateman <RJF at MIT-MC> 
Date: 19 February 1982 11:46-EST
From: Richard J. Fateman <RJF at MIT-MC>
To: GJC at MIT-MC
cc: RZ at MIT-MC, GLS at MIT-MC, rpg at SU-AI

I ran some timings on interlisp VAX.  It is several times slower
than Franz, but judging from your very limited data points,
several times faster than NIL.
Of course all could, in principle, be tuned, but I think it
suggests that with regard to Franz and Interlisp, there are no big surprises.
Moving from the 10 to the VAX, and from maclisp to Franz, there is
no  real magic.

  The interlisp environment is huge, and this is a liability, I think,
for many sites.

∂19-Feb-82  1629	George J. Carrette <GJC at MIT-MC> 
Date: 19 February 1982 13:06-EST
From: George J. Carrette <GJC at MIT-MC>
To: RJF at MIT-MC
cc: GLS at MIT-MC, RZ at MIT-MC, GJC at MIT-MC, rpg at SU-AI

The figure I gave for Macsyma timing in NIL is no datapoint for
lisp performance comparison. Be my guest if you want to say that
Franz is several times faster than interlisp, and interlisp is
several times faster than NIL, implying that Franz is several times
several times faster than NIL. But don't you dare blame me
when people find out that this is not the case.

What is going on is a good deal more complicated than your model.
Amoung other things, I compiled Macsyma with full CAR/CDR error
checking in compiled code. Such a thing is unheard of in any lisp
on conventional hardware. Another thing which is unheard of is
to bring up a program like Macsyma, in a new lisp, the first
program to be brought up in this lisp, in a mere two man-days.
[Well, 43 hours continuous hacking anyway.]

I feel I am wasting my breath talking about benchmarks of
programmer productivity to a man who distributes a lisp (Franz)
where "two-many-close-parens" generates the error message
"readlist error, code 3."

Lets talk about Lispm timing figures. You are still touting about
severely incorrect figures for Lispm Macsyma, even though I have
made revised and accurate figures available. 

-gjc


∂26-Feb-82  2006	Howard I. Cannon <HIC at SCRC-TENEX at MIT-AI> 	(TAK 18. 12. 6.) 
Date: Friday, 26 February 1982, 23:02-EST
From: Howard I. Cannon <HIC at SCRC-TENEX at MIT-AI>
Subject: (TAK 18. 12. 6.)
To: rpg at SU-AI

I saw this flying around on MC, and decided to try it at Symbolics.  The
time I get is 2.91 seconds per call, which is an average time over 21.
calls using Moon's LMTIME package.  This contrasts with the published
time of 3.1 seconds per call.  Perhaps the people who did the other timings
did not do a WITHOUT-INTERRUPTS?  I got an average time of 2.82 seconds
per call for TAKF (also 21. calls).

Those timings did not do a WITHOUT-INTERRUPTS, and I'm glad that someone
who knew what was happening tried  in on a LM. I look forward to
seeing what the L machine does. Can you  send me a pointer to the code
for LMTIME?
			-rpg-
∂27-Feb-82  1152	Howard I. Cannon <HIC at MIT-MC> 	(TAK 18. 12. 6.)     
Date: 27 February 1982 02:20-EST
From: Howard I. Cannon <HIC at MIT-MC>
Sender: HIC0 at MIT-MC
Subject:  (TAK 18. 12. 6.)  
To: RPG at SU-AI

AI:MOON;LMTIME has a reasonable version of that file.  If you really want to use
one, I can get the latest one from SCRC and put it at MIT somewhere.

The way I did the timings was by defining a function
(defun foo (x y z)
  (tak x y z))

and timed FOO.  I guess I forgot to look whether TAK actually calls itself.

∂26-Feb-82  1756	Masinter at PARC-MAXC 	some interesting old numbers    
Date: 26 Feb 1982 17:55 PST
From: Masinter at PARC-MAXC
Subject: some interesting old numbers
To: RPG@SU-AI

---------------------------
Date: 30 SEP 1975 1138-PDT
From: BOBROW
Subject: Where LISP spends its time (From Rusty Bobrow@bbna)
To:   TEITELMAN, winograd, kaplan, kay, fiala, moore, masinter,
To:   deutsch, lampson, bobrow, WILLIE-SUE

Rusty has made a series of measurements which confirm most of
our suspicions about where LISP spends its time:
He says 80 percent of the total time in LISP is spent
in the machine code portion of the system. 60-70% of the
total is spent in about 300 words of that code.  Herwith an analysis of 
that 60-70% for 4 programs.  
FC=Function Call and return time
BFV= Block free variable lookup
FV= Ordinary free variable lookup
TC=Typechecking
CONS= Time for doing the cons (NOT in GC)
EVAL= Time in the interpeter
Unbox= Time in unboxing numbers

SP= Speech Parsing System at BBN
DWIM= Teitelmans DWIM program
COM= Lisp compiler
GCOM=Burton's function oriented parser (compiled from a grammar)

	FC	BFV	FV	TC	CONS	EVAL	Unbox
SP	11		45	8	2	4
DWIM	8	7	11	13	1		7
COM	10	7.5	23	13	7.5
GCOM	27	21		5	5.5

NOte that free variable lookup took on the minimum 18% for
DWIM (the sum of BFV and FV) to 45% for SP.
Shallow binding which is currently being worked on 
will cut that time to zero, at the expense of some more time
in function calls.

The other figures Rusty has are working set.  He says the smallest he's found
is 80 pages minimum, and usually between 100 to 150 pages.
This bodes less well for ALTO LISP than we would like.
He is doing further experiments and promises to send on the results.
danny

------------------------------------------------------------

∂03-Mar-82  1043	George J. Carrette <GJC at MIT-MC> 
Date: 3 March 1982 13:27-EST
From: George J. Carrette <GJC at MIT-MC>
To: RPG at SU-AI

In looking over the TAK timings in various lisps you sent, I couldn't find
the expected timing for maclisp with a simple fixnum declaration.
Maybe that is what "bumbed" maclisp is meant to refer to, but
"bummed" is a rather unfair term to use when for example "PSL" uses
terms like "SYSLISP" and where the fixnum declaration is a rather easy
and transparent thing for the average user to use, and where using
the SYSLISP compilation mode has some other non-transparent ramifications.

Anyway, the time is 0.677 seconds for TAK, and 0.789 for TAKF. The
0.789 for TAKF is very important since it compares with the awful 5.9
second timing without the FIXNUM declaration or SUBRCALL dispatch.  I
happen to use SUBRCALL in Maclisp quite a bit, and almost never use
FUNCALL.  SUBRCALL can be used in interpreted code since TRAMPOLINES
can be consed up on-the-fly to jump to the interpreter. This technique
is used in NIL, which is one reason why the NIL timings for FUNCALL
are so good. It is fair to call use of SUBRCALL "bumming," as very
few people use it, besides myself I can only think of a few, GJS, GLS,
RWK, RLB, GSB. And GSB is the only other person I've seen using
consed-up-on-the-fly trampolines in Maclisp. (Although this technique
is used in maclisp system code written in MIDAS, e.g. SORT).


Here is the code for the MacLisp and `bummed' MacLisp referred to. Keep
in mind that SAIL is a KL, which is 80% of a 2060 (MC is the model B cpu,
more or less, and so is a 2060 in speed). I believe that the SAIL MacLisp
time is consistent with that general performace improvement. SCORE and MC
were down when I did the timings that day.

(defun tak (x y z)
       (cond ((not (< y x))	;x≤y
	      z)
	     (t (tak (tak (1- x) y z)
		     (tak (1- y) z x)
		     (tak (1- z) x y))))) 

(defun trtak (x y z)
       (prog ()
	     tak
	     (cond ((not (< y x))
		    (return z))
		   (t (let ((a (tak (1- x) y z))
			    (b (tak (1- y) z x)))
			   (setq z (tak (1- z) x y))
			   (setq x a y b)(go tak))))))

;;;  So-called `bummed MacLisp' which reflect the assembly language bums used.

(defun btak (x y z)
 (prog ()
       (cond ((not (< y x))
	      (return z)))
	     
       tak2
       (let ((a (let ((c (1- x)))
		     (cond ((not (< y c)) z)
			   (t (btak2 c y z)))))
	     (b (let ((c (1- y)))
		     (cond ((not (< z c)) x)
			   (t (btak2 c z x)))))
	     (c (let ((c (1- z)))
		     (cond ((not (< x c)) y)
			   (t (btak2 c x y))))))
	    (cond ((not (< b a)) (return c))
		  (t (setq x a
			   y b
			   z c)
		     (go tak2))))))

(defun btak2 (x y z)
 (prog ()
       tak2
       (let ((a (let ((c (1- x)))
		     (cond ((not (< y c)) z)
			   (t (btak2 c y z)))))
	     (b (let ((c (1- y)))
		     (cond ((not (< z c)) x)
			   (t (btak2 c z x)))))
	     (c (let ((c (1- z)))
		     (cond ((not (< x c)) y)
			   (t (btak2 c x y))))))
	    (cond ((not (< b a)) (return c))
		  (t (setq x a
			   y b
			   z c)
		     (go tak2))))))

(defun timit ()
 ((lambda (t1 x gt)
	(tak 18. 12. 6.)
	  (setq t1 (- (runtime) t1))
	  (setq gt (- (status gctime) gt))
	  (print (list 'runtime
		       (QUOTIENT (FLOAT  (- t1 gt))
				 1000000.)))
	  (print (list 'gctime
		       (quotient (float gt) 1000000.))))
  (runtime) ()(status gctime)))

(defun timit ()
 ((lambda (t1 x gt)
	(tak 18. 12. 6.)
	  (setq t1 (- (runtime) t1))
	  (setq gt (- (status gctime) gt))
	  (print (list 'runtime
		       (QUOTIENT (FLOAT  (- t1 gt))
				 1000000.)))
	  (print (list 'gctime
		       (quotient (float gt) 1000000.))))
  (runtime) ()(status gctime)))

(defun trimit ()
 ((lambda (t1 x gt)
	(trtak 18. 12. 6.)
	  (setq t1 (- (runtime) t1))
	  (setq gt (- (status gctime) gt))
	  (print (list 'runtime
		       (QUOTIENT (FLOAT  (- t1 gt))
				 1000000.)))
	  (print (list 'gctime
		       (quotient (float gt) 1000000.))))
  (runtime) ()(status gctime)))


(defun btimit ()
 ((lambda (t1 x gt)
	(btak 18. 12. 6.)
	  (setq t1 (- (runtime) t1))
	  (setq gt (- (status gctime) gt))
	  (print (list 'runtime
		       (QUOTIENT (FLOAT  (- t1 gt))
				 1000000.)))
	  (print (list 'gctime
		       (quotient (float gt) 1000000.))))
  (runtime) ()(status gctime)))

∂23-Apr-82  2308	RPG  	On the air again   
To:   lisptranslators at SU-AI   
I am sending out benchmarks again and hope to get most of this
out of the way by June. Larry Masinter and I have written a paper
on the evaluation and timing of Lisp systems for the Lisp conference
this August in Pittsburgh, so we have not been inactive. I have, oddly
enough, timings for TAK (that famous function) on many machines and
many languages, and I will send out those timings as a teaser later on.

I will be sending out the Berkeley FRPOLY benchmark again soon, because
I do not have all the results, so even if you do it all again that
will be helpful.

In addition, I will have about 3 or 4 more out in quick succession,
including the Forestt Basket Puzzle benchmark.

I guess I want to concentrate on the major Lisp dialects and machines
to cut down on useless work. If you are a major Lisp dialect and 
think I don't realize that, let me know.
			-rpg-

∂25-Apr-82  1340	RPG  	FRANZ Benchmark (called FRPOLY)   
To:   lisptranslators at SU-AI   
This is a repeat of one I sent out earlier, but I have not gotten
it from all, or even many sites. Please try it again:

Here, below, is the benchmark from Berkeley. It is in roughly
MacLisp syntax, but let me point out a few things about it.

First, DEFMACRO and the ` (backquote) syntax. DEFMACRO is
a mechanism for defining macros in MacLisp in which the form
is broken into named arguments, unlike standard MacLisp macros
with have exactly 1 argument which is the macro form itself (EQly
that form). The backquote syntax takes a form and produces code
to generate that form. A example helpe here:

	`(atom ,e) turns into (list 'atom e)
	`(signp e ,x) is (list 'signp 'e x)

Thus, , (comma) is the unquoting character.
For example, then, occurrences of (pcoefp x) in the code
below turn into (atom x) by the action of the macro
pcoefp. DEFMACRO provides a form which is substituted for
the calling form with arguments bound in the obvious manner.
Here is the equivalent standard MacLisp macro definition of
pcoefp:

	(defun pcoefp macro (x)
	       (list 'atom (cadr x)))

To run this benchmark interpretively, I suggest expanding the
macros once, either at read time or at first runtime. For those
who need it I can provide this file with macros expanded.

Another hack for defining these macros so that they are expanded
once only is:

(defun pcoefp macro (x)
  ((lambda (form)
    (rplaca x (car form))
    (rplacd x (cdr form))
    form)		   ;value of RPLACD assumed to be undefined
   (list 'atom (cadr x))))

LOCALF seems to be a declaration of LOCAL function names. For MacLisp
I've commented this out. SPECIAL means that there is a global
value cell and that binding is dynamic on that cell.

Here is what SIGNP does:

2) SIGNP IS NOW A FSUBR.  THE FIRST ITEM IN THE ARGLIST IS AN
INDICATOR FOR COMPARISON TO ZERO, E.G., (SIGNP LE N) IS NON-NIL
IF AND ONLY IF THE VALUE OF N IS A NUMBER LESS THAN OR EQUAL TO 
ZERO [SIGNP DOES NOT REQUIRE N TO BE OF NUMBER TYPE].  THE
INDICATORS FOLLOW THE PDP-10 ARITHMETIC COMPARISON INSTRUCTIONS, AND
SHOULD BE SELF EXPLANATORY:  L E LE GE N G 
[E means zerop, N means not zerop.]

(RUNTIM) and (STATUS GCTIME) return the number of microseconds of
total runtime and gctime. Note that gctime is included in
runtime in MacLisp.

There is a difference between `+' and `PLUS' in Franz, which is
that + takes 2 arguments, both fixnums (machine integers) and returns
a fixnum as its result. PLUS takes any number of any type of number and
returns the most appropriate type number. In the tests below, one of them
is designed to overflow the VAX machine integer range and drift into
BIGNUMs, which are any integer larger than the architecture supports. In MacLisp
and FRANZ there is a BIGNUM packake that allows one to have contiguous
words of memory represent one number. So, beware of where there are +'s and
PLUS's. The same is true for - and DIFFERENCE, * and TIMES, / and QUOTIENT,
> and GREATERP, < and LESSP, etc. Generic arithmetic is closed compiled
while specific type is open coded.

(ODPP x) tests if X is odd.

= is numeric EQUAL.

PDIFFER1 is mentioned but not defined; is not called for these tests, however.

Here's my transcript of SAIL MacLisp:

(setup)
(Z 1 1.0 0 (Y 1 1.0 0 (X 1 1.0 0 1.0))) 
(bench 2)
(POWER= 2 (0.017 0.0) (0.017 0.0) (0.016 0.0)) 
(bench 5)
(POWER= 5 (0.116 0.0) (1.334 1.084) (0.15 0.0)) 
(bench 10)
(POWER= 10 (2.534 1.8) (19.733 17.151) (8.983 7.901)) 
(bench 15)
(POWER= 15 (16.65 8.832) (112.516 89.298) (63.9 56.749)) 

Which I ran compiled. Times are in seconds.

The following is the benchmark. 
			-rpg-


;;;; Benchmark Commences:

;;; Franz Lisp benchmark from Fateman
;; test from Berkeley based on polynomial arithmetic.

(declare (special ans coef f inc i k qq ss v *x*
		    *alpha *a* *b* *chk *l *p q* u* *var *y*
		    r r2 r3 start res1 res2 res3))
(declare (localf pcoefadd pcplus pcplus1 pplus ptimes ptimes1
		 ptimes2 ptimes3 psimp pctimes pctimes1
		 pplus1))
;; Franz uses maclisp hackery here; you can rewrite lots of ways.
(defmacro pointergp (x y) `(> (get ,x 'order)(get ,y 'order)))

(defmacro pcoefp (e) `(atom ,e))
(defmacro pzerop (x) `(signp e ,x))			;true for 0 or 0.0
(defmacro pzero () 0)
(defmacro cplus (x y) `(plus ,x ,y))
(defmacro ctimes (x y) `(times ,x ,y))


(defun pcoefadd (e c x) (cond ((pzerop c) x)
			      (t (cons e (cons c x)))))

(defun pcplus (c p) (cond ((pcoefp p) (cplus p c))
			  (t (psimp (car p) (pcplus1 c (cdr p))))))

(defun pcplus1 (c x)
       (cond ((null x)
	      (cond ((pzerop c) nil) (t (cons 0 (cons c nil)))))
	     ((pzerop (car x)) (pcoefadd 0 (pplus c (cadr x)) nil))
	     (t (cons (car x) (cons (cadr x) (pcplus1 c (cddr x)))))))
	 
(defun pctimes (c p) (cond ((pcoefp p) (ctimes c p))
			   (t (psimp (car p) (pctimes1 c (cdr p))))))

(defun pctimes1 (c x)
       (cond ((null x) nil)
	     (t (pcoefadd (car x)
			  (ptimes c (cadr x))
			  (pctimes1 c (cddr x))))))

(defun pplus (x y) (cond ((pcoefp x) (pcplus x y))
			 ((pcoefp y) (pcplus y x))
			 ((eq (car x) (car y))
			  (psimp (car x) (pplus1 (cdr y) (cdr x))))
			 ((pointergp (car x) (car y))
			  (psimp (car x) (pcplus1 y (cdr x))))
			 (t (psimp (car y) (pcplus1 x (cdr y))))))

(defun pplus1 (x y)
       (cond ((null x) y)
	     ((null y) x)
	     ((= (car x) (car y))
	      (pcoefadd (car x)
			(pplus (cadr x) (cadr y))
			(pplus1 (cddr x) (cddr y))))
	     ((> (car x) (car y))
	      (cons (car x) (cons (cadr x) (pplus1 (cddr x) y))))
	     (t (cons (car y) (cons (cadr y) (pplus1 x (cddr y)))))))

(defun psimp (var x)
       (cond ((null x) 0)
	     ((atom x) x)
	     ((zerop (car x)) (cadr x))
	      (t (cons var x))))

(defun ptimes (x y) (cond ((or (pzerop x) (pzerop y)) (pzero))
			  ((pcoefp x) (pctimes x y))
			  ((pcoefp y) (pctimes y x))
			  ((eq (car x) (car y))
			   (psimp (car x) (ptimes1 (cdr x) (cdr y))))
			  ((pointergp (car x) (car y))
			   (psimp (car x) (pctimes1 y (cdr x))))
			  (t (psimp (car y) (pctimes1 x (cdr y))))))

(defun ptimes1 (*x* y) (prog (u* v)
			       (setq v (setq u* (ptimes2 y)))
			  a    (setq *x* (cddr *x*))
			       (cond ((null *x*) (return u*)))
			       (ptimes3 y)
			       (go a)))

(defun ptimes2 (y) (cond ((null y) nil)
			 (t (pcoefadd (plus (car *x*) (car y))
				      (ptimes (cadr *x*) (cadr y))
				      (ptimes2 (cddr y))))))

(defun ptimes3 (y) 
  (prog (e u c) 
     a1 (cond ((null y) (return nil)))
	(setq e (+ (car *x*) (car y)))
	(setq c (ptimes (cadr y) (cadr *x*) ))
	(cond ((pzerop c) (setq y (cddr y)) (go a1))
	      ((or (null v) (> e (car v)))
	       (setq u* (setq v (pplus1 u* (list e c))))
	       (setq y (cddr y)) (go a1))
	      ((= e (car v))
	       (setq c (pplus c (cadr v)))
	       (cond ((pzerop c) (setq u* (setq v (pdiffer1 u* (list (car v) (cadr v))))))
		     (t (rplaca (cdr v) c)))
	       (setq y (cddr y))
	       (go a1)))
     a  (cond ((and (cddr v) (> (caddr v) e)) (setq v (cddr v)) (go a)))
	(setq u (cdr v))
     b  (cond ((or (null (cdr u)) (< (cadr u) e))
	       (rplacd u (cons e (cons c (cdr u)))) (go e)))
	(cond ((pzerop (setq c (pplus (caddr u) c))) (rplacd u (cdddr u)) (go d))
	      (t (rplaca (cddr u) c)))
     e  (setq u (cddr u))
     d  (setq y (cddr y))
	(cond ((null y) (return nil)))
	(setq e (+ (car *x*) (car y)))
	(setq c (ptimes (cadr y) (cadr *x*)))
     c  (cond ((and (cdr u) (> (cadr u) e)) (setq u (cddr u)) (go c)))
	(go b))) 

(defun pexptsq (p n)
	(do ((n (quotient n 2) (quotient n 2))
	     (s (cond ((oddp n) p) (t 1))))
	    ((zerop n) s)
	    (setq p (ptimes p p))
	    (and (oddp n) (setq s (ptimes s p))) ))

(defun setup nil
  (putprop 'x 1 'order)
  (putprop 'y 2 'order)
  (putprop 'z 3 'order)
  (setq r (pplus '(x 1 1 0 1) (pplus '(y 1 1) '(z 1 1)))) ; r= x+y+z+1
  (setq r2 (ptimes r 100000)) ;r2 = 100000*r
  (setq r3 (ptimes r 1.0)); r3 = r with floating point coefficients
  )
; time various computations of powers of polynomials, not counting
;printing but including gc time ; provide account of g.c. time.

; The following function uses (ptime) for process-time and is thus
;  Franz-specific.

(defmacro ptime () '`(,(runtime) ,(status gctime)))

(defun bench (n)
  (setq start (ptime)) ;  Franz ticks, 60 per sec, 2nd number is GC
  (pexptsq r n) 
  (setq res1 (ptime))
  (pexptsq r2 n)
  (setq res2 (ptime))
  ; this one requires bignums.
  (pexptsq r3 n)
  (setq res3 (ptime))
  (list 'power=  n (b1 start res1)(b1 res1 res2)(b1 res2 res3)))
(defun b1(x y)(mapcar '(lambda(r s)(quotient (float (- s r)) 1000000.0)) x y))

;instructions:
;  after loading, type (setup)
; then (bench 2) ; this should be pretty fast.
; then (bench 5)
; then (bench 10)
; then (bench 15)
;... 

∂25-Apr-82  1349	RPG  	Lisps I want to see
To:   lisptranslators at SU-AI   
	Here are the Lisps I want to see benchmarked:

	MacLisp on ITS, TOPS-20, TOPS-10/WAITS, F2 (?)
	InterLisp on KL-10, TOPS-20, TENEX (?), Dolphin, Jericho, Symbolics 360,
		     Vax 780, Vax 750
	Common Lisp/ZetaLisp on LM-2 (Cadr), 3600
	Common Lisp/SpiceLisp on PERQ, 3600, Vax 780, Vax 750
	Common Lisp/S-1 Lisp on S-1 MArk IIA
	PSL on TOPS-20, Vax 780, Vax 750, Apollo/68000
	Common Lisp/NIL on Vax 780, Vax 750
	Franz Lisp on Vax 780, Vax 750
	UCIlisp on TOPS-20
	ELISP on TOPS-20

			-rpg-

∂25-Apr-82  1400	RPG  	Takeuchi 
To:   lisptranslators at SU-AI   
Here is the TAK benchmark plus the timings so far for the
case (TAK 18. 12. 6.) Also included are the version that GJC provided as an
additional test of FUNCALL technology. Please do at least the straight TAK
version, and possibly the TAKF version. I want to do a FUNCALL test, but
probably I want to remove arithmetic from the measurement.

Please report what your compiler does about tail recursion.

Takeuchi function of various types
tak (18. 12. 6.)

On 11/750 in Franz ordinary arith     19.9   seconds compiled
On 11/780 in Franz with (nfc)(TAKF)   15.8   seconds compiled	(GJC time)
On Dolphin in InterLisp Nov 1981 (tr) 11.195 seconds compiled
On 11/780 in Franz (nfc)	       8.4   seconds compiled	(KIM time)
On 11/780 in Franz (nfc)               8.35  seconds compiled	(GJC time)
On 11/780 in Franz with (ffc)(TAKF)    7.5   seconds compiled	(GJC time)
On 11/750 in PSL, generic arith        7.1   seconds compiled
On MC (KL) in MacLisp (TAKF)	       5.9   seconds compiled	(GJC time)
On Dolphin in InterLisp Jan 1982 (tr)  5.71  seconds compiled
On Vax 11/780 in InterLisp (load = 0)  4.24  seconds compiled
On Foonly F2 in MacLisp 	       4.1   seconds compiled
On Apollo (MC68000) PASCAL	       3.8   seconds		(extra waits?)
On 11/750 in Franz, Fixnum arith       3.6   seconds compiled
On MIT CADR in ZetaLisp		       3.16  seconds compiled	(GJC time)
On MIT CADR in ZetaLisp		       3.1   seconds compiled	(ROD time)
On MIT CADR in ZetaLisp (TAKF)         3.1   seconds compiled	(GJC time)
On Apollo (MC68000) PSL SYSLISP	       2.93  seconds compiled
On 11/780 in NIL (TAKF) 	       2.8   seconds compiled	(GJC time)
On 11/780 in NIL		       2.7   seconds compiled	(GJC time)
On 11/750 in C                         2.4   seconds
On 11/780 in Franz (ffc)	       2.13  seconds compiled	(KIM time)
On 11/780 (Diablo) in Franz (ffc)      2.1   seconds compiled	(VRP time)
On 11/780 in Franz (ffc)	       2.1   seconds compiled	(GJC time)
On 68000 in C			       1.9   second
On Utah-20 in PSL Generic arith	       1.672 seconds compiled
On 11/750 in PSL INUM arith            1.4   seconds compiled
On 11/780 (Diablo) in C  	       1.35  seconds
On 11/780 in Franz (lfc)               1.13  seconds compiled	(KIM time)
On UTAH-20 in Lisp 1.6		       1.1   seconds compiled
On UTAH-20 in PSL Inum arith	       1.077 seconds compiled
On SAIL (KL) in MacLisp	      	        .832 seconds compiled
On SAIL in bummed MacLisp           	.795 seconds compiled
On MC (KL) in MacLisp (TAKF,dcl)        .789 seconds compiled
On 68000 in machine language		.7   seconds
On MC (KL) in MacLisp (dcl)	        .677 seconds compiled
On SAIL in bummed MacLisp (dcl)	    	.616 seconds compiled
On SAIL (KL) in MacLisp	(dcl)	        .564 seconds compiled
On Dorado in InterLisp Jan 1982	(tr)	.53  seconds compiled
On UTAH-20 in SYSLISP arith		.526 seconds compiled
On SAIL in machine language		.255 seconds (wholine)
On SAIL in machine language		.184 seconds (ebox-does not include mem)
On SCORE (2060) in machine language     .162 seconds (ebox)
On S-1 Mark I in machine language       .114 seconds (ebox & ibox)

47707 function calls
max recursion depth is 18
average recursion depth is 15.4

(defun tak (x y z)
       (cond ((not (< y x))
	      z)
	     (t (tak (tak (1- x) y z)
		     (tak (1- y) z x)
		     (tak (1- z) x y))))))

notes:
(tr) means Tail Recursion Removal
(nfc) means `normal function call' in Franz (debugging setting (like (NOUUO t)))
(ffc) means `fast function call' in Franz (non-debugging setting (like (NOUUO ()))
(lfc) means `local function call' in Franz (function call directly to an entry point
					    using knowledge of the internals of the
					    function by the compiler)
(dcl) means heavy MacLisp declarations

;;; Here ar the definitions of TAKF as provided by GJC. #-NIL means
;;; except in NIL, #+NIL means for NIL.
(defun takf (x y z)
  (takfsub #'takfsub x y z))

#-NIL
(defun takfsub (f x y z)
  (if (not (< y x))
      z
      (funcall f f (funcall f f (1- x) y z)
	       (funcall f f (1- y) z x)
	       (funcall f f (1- z) x y))))

#+NIL
(defun takfsub ((&function f) x y z)
  ;; lexical scoping of function bindings allows this.
  (if (not (< y x))
      z
      (f #'f (f #'f (1- x) y z)
	 (f #'f (1- y) z x)
	 (f #'f (1- z) x y))))

∂26-Apr-82  1421	RPG  	Puzzle Benchmark   
To:   lisptranslators at SU-AI   
Here is a transposition of the Forestt Basket Puzzle Benchmark which
is used to benchmark Algolish languages. I don't know what it
does (mainly because I didn't bother to read the code). I will point out
some of the highlights.

;;; START OF BENCHMARK

;;; These specials are referred to globally, so you might want
;;; to do a GLOBALVARS definition here. PLACE is a function that returns
;;; a fixnum

(declare (special size classmax typemax d)
	 (fixnum (place fixnum fixnum)
		 size classmax typemax d))

;;; This is used to make the code late look good. The syntax #.TRUE makes the
;;; reader substitute the value of TRUE into the read stream

(setq true t false ())
(declare (setq true t false ()))

;;; This is for the testing printout, which I will show later

;(defmacro tab () '(tyo 9.))

;;; Here are the values of those globals

(setq size 511.)
(setq classmax 3.)
(setq typemax 12.)
(setq d 8.)

(declare (special iii kount)
	 (fixnum iii i j k kount m n))

;;; PIECECOUNT, CLASS, and PIECEMAX are 1 dimensional, fixnum arrays,
;;; and PUZZLE and P are pointer arrays with 1 dimension

(declare (array* (fixnum piececount 1 class 1 piecemax 1)
		 (notype puzzle 1 p 2)))

;;; MacLisp has 0-based arrays, and we need to go from 1 up to classmax

(array piececount fixnum (1+ classmax))
(array class fixnum (1+ typemax))
(array piecemax fixnum (1+ typemax))
(array puzzle t (1+ size))
(array p t (1+ typemax) (1+ size))

;;; In PASCAL this was:
;;; function fit (i : pieceType; j : position) : boolean;
;;;
;;; label	1;
;;; var	k	:	position;
;;;
;;; begin
;;;	fit := false;
;;;	for k := 0 to pieceMax[i] do
;;;		if p[i,k] then if puzzle[j+k] then goto 1;
;;;	fit := true;
;;; 1:
;;; end;
;;; Great style, eh?

(defun fit (i j)
 (let ((end (piecemax i)))
      (do ((k 0 (1+ k)))
	  ((> k end) #.true)
	  (cond ((p i k)
		 (cond ((puzzle (+ j k))
			(return #.false))))))))

;;; The commented stuff is for the optional printout
;;; (store (puzzle i) <value>) stores <value> into the ith position of
;;; the array PUZZLE.

(defun place (i j)
       (let ((end (piecemax i)))
	    (do ((k 0 (1+ k)))
		((> k end))
		(cond ((p i k) 
		       (store (puzzle (+ j k)) #.true))))
		 (store (piececount (class i)) (- (piececount (class i)) 1))
	    (do ((k j (1+ k)))
		((> k size)

;		 (terpri)
;		 (princ "Puzzle filled") 

		 0)
		(cond ((not (puzzle k))
		       (return k))))))

(defun remove (i j)
       (let ((end (piecemax i)))
	    (do ((k 0 (1+ k)))
		((> k end))
		(cond ((p i k) (store (puzzle (+ j k)) #.false))))
	    (store (piececount (class i)) (+ (piececount (class i)) 1))))

(defun trial (j)
       (let ((k 0))
	    (do ((i 0 (1+ i)))
		((> i typemax) (setq kount (1+ kount)) 
			       #.false)
		(cond ((not (= (piececount (class i)) 0))
		       (cond ((fit i j)
			      (setq k (place i j))
			      (cond ((or (trial k)
					 (= k 0))

;				     (terpri)
;				     (princ "Piece") (tab)
;				     (princ (+ i 1)) (tab)
;				     (princ "at")(tab)(princ (+ k 1))

				     (setq kount (+ kount 1))
				     (return #.true))
				    (t (remove i j))))))))))

(defun definepiece (iclass ii jj kk)
       (let ((index 0))
	    (do ((i 0 (1+ i)))
		((> i ii))
		(do ((j 0 (1+ j)))
		    ((> j jj))
		    (do ((k 0 (1+ k)))
			((> k kk))
			(setq index  (+ i (* d (+ j (* d k)))))
			(store (p iii index) #.true))))
	    (store (class iii) iclass)
	    (store (piecemax iii) index)
	    (cond ((not (= iii typemax))
		   (setq iii (+ iii 1))))))

;;; This is the initialization and testing function

(defun start ()
       (do ((m 0 (1+ m)))
	   ((> m size))
	   (store (puzzle m) #.true)) 
       (do ((i 1 (1+ i)))
	   ((> i 5))
	   (do ((j 1 (1+ j)))
	       ((> j 5))
	       (do ((k 1 (1+ k)))
		   ((> k 5))
		   (store (puzzle (+ i (* d (+ j (* d k))))) #.false))))
       (do ((i 0 (1+ i)))
	   ((> i typemax))
	   (do ((m 0 (1+ m)))
	       ((> m size))
	       (store (p i m) #.false)))
       (setq iii 0)
       (definePiece 0 3 1 0)
       (definePiece 0 1 0 3)
       (definePiece 0 0 3 1)
       (definePiece 0 1 3 0)
       (definePiece 0 3 0 1)
       (definePiece 0 0 1 3)

       (definePiece 1 2 0 0)
       (definePiece 1 0 2 0)
       (definePiece 1 0 0 2)

       (definePiece 2 1 1 0)
       (definePiece 2 1 0 1)
       (definePiece 2 0 1 1)

       (definePiece 3 1 1 1)

       (store (pieceCount 0) 13.)
       (store (pieceCount 1) 3)
       (store (pieceCount 2) 1)
       (store (pieceCount 3) 1)
       (let ((m (+ 1 (* d (+ 1 d))))
	     (n 0)(kount 0))
	    (cond ((fit 0 m) (setq n (place 0 m)))
		  (t (terpri)(princ "Error")))
	    (cond ((trial n) 
		   (terpri)(princ "success in ")(princ kount) (princ " trials")) 
		  (t (terpri)(princ "failure"))) 
	    (terpri)))

;;; Here's how I time it at SAIL
(defun timit ()
       ((lambda (t1 x gt)
		(start)
		(setq t1 (- (runtime) t1))
		(setq gt (- (status gctime) gt))
		(print (list 'runtime
			     (//$ (float  (- t1 gt))
				  1000000.0)))
		(print (list 'gctime
			     (//$ (float gt) 1000000.0))))
	(runtime) ()(status gctime)))

;;; END OF BENCHMARK

Here's what it types out in verbose mode (those commented out lines
put back in) when I do (TIMIT). Use this to debug your version. 

Puzzle filled
Piece	1	at	1
Piece	8	at	354
Piece	7	at	330
Piece	3	at	291
Piece	13	at	278
Piece	12	at	276
Piece	5	at	275
Piece	1	at	267
Piece	1	at	219
Piece	3	at	203
Piece	1	at	202
Piece	1	at	154
Piece	9	at	138
Piece	2	at	110
Piece	2	at	108
Piece	1	at	106
Piece	3	at	90
success in 2005 trials
(RUNTIME 8.736) 
(GCTIME 0.363) 
T 

This is what it types without the printing stuff

success in 2005 trials
(RUNTIME 8.736) 
(GCTIME 0.363) 
T 

Have fun with this one.
			-rpg-

∂26-Feb-82  0942	Griss at UTAH-20 (Martin.Griss) 	PIG2.MSG    
Date: 26 Feb 1982 1038-MST
From: Griss at UTAH-20 (Martin.Griss)
Subject: PIG2.MSG
To: rpg at SU-AI
cc: griss at UTAH-20

                            PSL Interest Group
                             24 February 1982


     Since my last message in December, we have concentrated on a major
improvement of the VAX system, the addition of new modules to VAX and
DEC-20, and have made a serious start on the Apollo (MC68000) version of
PSL.  Please send a message if you wish to be removed from this mailing
LIST, or wish other names to be added.

	Martin L. Griss,
	CS Dept., 3160 MEB,
	University of Utah,
	Salt Lake City, Utah 84112.
	(801)-581-6542

--------------------------------------------------------------------------

VAX:

    We now have the second version of VAX PSL running quite well.  As was
reported in the December, the initial speed of the first VAX PSL seemed
comparable to Franz LISP on our 11/750 under Unix, with some significantly
slower tests.  For this new version (V3, since it is more advanced than V2
PSL on the DEC-20), we have improved the open coded arithmetic and the basic
code generation scheme, and obtain much improved results.

    The major effort was a significant re-write of LAP, the design and
implementation of a fast-loader, an improved LAP-to-Assembly-Code
translator, and CMACRO expander.  In order to ease the task of
bootstrapping PSL, and the more rapid implementation of resident LAP and
fast-loader, it was decided to use a much more tabular, pattern matching
approach. This increases the amount of common code between the different
LAP based modules (resident LAP, LAP-TO-ASM and FASL), as well as between
different machines. Each machine now requires a fairly concise set of
tables and PRINTF formats to describe the LAP-TO-ASM process. The resident
LAP and FASL are also cleaner.

   V3 PSL has a new tagging scheme that gives 28 bit INUMS on the VAX; i.e.
INUMS are now the same as SYSLISP-integers in the previous model, so that
we won't need to use SYSLISP level integers as much. The new V3 is roughly
twice as fast as V2, and is faster than Franz LISP in many tests (see the
timings given below).  V3 now has a binary fast-loader which of course is
some 20 times faster than the loading of LAP. 


DEC-20:

   Most of the DEC-20 effort since December has been directed at the
preliminary manual, minor fixups, and the addition of small modules. V2 PSL
on the DEC-20 has been used for a LISP class this quarter, without major
errors appearing; the manual has been distributed in a limited edition, and
is now undergoing revision for another mailing. V2 PSL is being used by
other groups in the department to develop VLSI and CAGD software that they
will move to the VAX version in the next few weeks. A number of programs
have been moved to the VAX with very few problems.  We expect to move the
EMODE screen editor during the next week. At that point ALL facilities
developed on V2 DEC-20 PSL will be running on V3 PSL. [The BIGNUM package
will run, but we are now rewriting it to use heap allocated INUM vectors].

   Now that the VAX version is stable, we will rebuild the DEC-20 version
to bring it to V3 level. The major effort is recoding the compiler CMACROs
and some support LAP to conform to the new LAP format, changing the garbage
collector to accommodate the new tags, and adapt the Fast-loader.  This
should take a few weeks at most. At the same time, the code will be
prepared for the Extended Addressing DEC-20/60 which we will run under
Version 5 of the TOPS-20 monitor. With our full-word tagged item, we find
that V2 PSL can not simultaneously support all of the interesting modules
that we would like (EMODE screen editor interface, Lisp Graphics package,
and the REDUCE algebra system) and so will need the extra space provided by
the extended 20, or the VAX.

68000:

    Because of the VAX LAP rewrite, we decided to delay slightly on the
start of Apollo PSL, and instead concentrated on small assembly code
experiments, and graphics support. Around mid-January, we captured a
version of the VAX CMACRO and LAP-TO-ASM tables, and began the conversion
effort.  (The VAX was not quite done at that point, but it appeared good
enough). Since then, we have completed and tested most of the 68000
CMACROs, developed some simple support code, and successfully compiled and
run three test files. These include the usual FACTORIAL, simple I/O, list
printer, the TAK function, and a fairly comprehensive test of most aspects
of SYSLISP. A new programmer has just joined the team and we expect to
start moving somewhat faster in the next few weeks. We have received an
Apollo cross-assembler and simulator for the VAX from Brown University, and
expect this to aid in the task of more rapid cross-compilation,
cross-assembly and testing.

--------------------------------------------------------------------------
TIMINGS:

    At the suggestion of Dick Gabriel at Stanford, we collected some
statistics on (TAK 18. 12. 6.) measurements, and the following summarizes
out results (December/January):

DEC-20/60:
	LISP 1.6 generic arith		1.1 seconds
	PSL generic   V2 arith          1.67
	PSL Inum      V2 arith          1.08
	SYSLISP       V2 arith           .526
	C (New Utah PCC Implementation)  .977

VAX 11/750:
	PSL V3, generic  arith          7.1
	PSL V3, Inum arith              1.4     [inum =syslisp on VAX now]
	Franz, generic  arith          19.9
	Franz, Fixnum arith             3.6     [using 1+, 1-, * etc in Franz]
	C                               2.4

Apollo/68000:
	PSL V3 Inum arithmetic          2.9    [no LISP arithmetic yet]
	Apollo PASCAL                   3.9

	[Because the Apollo uses a pair of 68000's to handle virtual
         memory, there appear to be some extra wait states, and these may
         not be the "best" 68000 times. We will do a Wicat timing soon]

--------------------------------------------------------------------------

Some additional reference points collected by Dick Gabriel:
 Dolphin in InterLisp Nov 1981 (tr) 11.195 seconds compiled
 Dolphin in InterLisp Jan 1982 (tr)  5.71  seconds compiled
 Foonly F2 in MacLisp 	             4.1   seconds compiled
 MIT CADR in ZetaLisp   	     3.1   seconds compiled
 11/780 (Diablo) in Franz	     2.1   seconds compiled
 68000 in C			     1.9   second
 11/780 (Diablo) in C  	             1.35  seconds
 SAIL (KL) in MacLisp	              .83  seconds compiled
 SAIL in bummed MacLisp		      .79  seconds compiled
 68000 in machine language	      .7   seconds
 Dorado in InterLisp Jan 1982	(tr)  .53  seconds compiled
 SAIL in machine language             .255 seconds (wholine)
 SAIL in machine language	      .184 seconds (ebox-does not include mem)
 SCORE (2060) in machine language     .162 seconds (ebox)
 S-1 Mark I in machine language       .114 seconds (ebox & ibox)

notes:
(tr) means Tail Recursion Removal

The best MACLISP and machine code times involved open-coded FIXNUM
arithmetic, hand-unfolding of LISP recursion, and hand-register allocation.
Most of this is one automatically in the PSL compiler.

--------------------------------------------------------------------------

Some more detailed PSL and Franz times:

VAX 11/750 Tests      Franz Lisp  	 PSL      Nature of Test
----------------      ----------       -----      -----------------------
EmptyTest 10000		 374		  51      [An INUM or FIXNUM loop]
SlowEmptyTest 10000	3417		1054	  [Generic arith Loop]
ReverseTest 10		 714		1632 (*)  [Dominated by CONS]
LengthTest 100		4607		2329	  [Mostly LIST walking]
ArithmeticTest 10000	7990		1955	  [Factorial 9]
EvalTest 10000		9333	       10013	  [Eval some expression]
tak 18 12 6		3434		1343	  [INUM or FIXNUM]
gtak 18 12 6	       19941		7208	  [Generic arith]
gtstb g0	       25534		4216	  [A loop with FUNCALL]
gtstb g1	       30413		4369      [Another FUNCALL test]

    These are the best Franz Lisp times we could obtain, involving some
fiddling with preallocating LIST and FIXNUM space and (sstatus translink
on) or (sstatus translink t). Timing on OPUS 36, mid-February on UTAH
VAX-11/750 under 4.1 Berkeley Unix.

(*) After redoing PSL CONS to be a direct heap allocator, rather than
calling a more general heap allocator, this time for Reverse can be reduced
to 1.190 seconds. 

-------

∂28-Feb-82  0940	John O'Donnell <Odonnell at YALE> 	LISP benchmark package   
Date:    28-Feb-82 1235-EST
From:    John O'Donnell <Odonnell at YALE>
Subject: LISP benchmark package
To:      Rpg at SU-AI

Hi.  I've been told you've been working on a set of standards by which to 
compare LISP implementations.

As we've been building an implementation of a new LISP dialect (called T,
almost including SCHEME as a proper subset) for the VAX and Apollo, 
I'd be interested to learn more about your ideas as a comparison tool.
-------

∂10-Mar-82  2148	Griss at UTAH-20 (Martin.Griss) 	MACLISP times    
Date: 10 Mar 1982 2244-MST
From: Griss at UTAH-20 (Martin.Griss)
Subject: MACLISP times
To: rpg at SU-AI
cc: griss at UTAH-20

How do your compare MACLISP times on KL (eg MIT-MC) wit DEC-20/60.
We have been timing so more odds and ends (driven by Fateman), involving
various loops, etc. Have gatthered some 10 test, and are running on PSL V3 on VAX and
20, and Franz, LISP 1.6 and MACLISP; send a copy to Fateman and Jonl, got some 
real "polish", so hard to decide what is REAL effects. JONL has some overhead
subtraction algorithm (???), seems to give much faster times then when we do it.
Also, has LOTS more declares etc. How do you resolve such issues?
-------

∂16-Mar-82  0614	Griss at UTAH-20 (Martin.Griss) 	Some new tests   
Date: 16 Mar 1982 0714-MST
From: Griss at UTAH-20 (Martin.Griss)
Subject: Some new tests
To: rpg at SU-AI
cc: griss at UTAH-20

We have gatheed the following set of tests; any suggestions for additons:
(TestSetup)
(reclaim)
(princ "EmptyTest 10000		")
(princ (TimeEval '(EmptyTest 10000)))
(terpri)
(princ "SlowEmptyTest 10000	")
(princ (TimeEval '(SlowEmptyTest 10000)))
(terpri)
(princ "Cdr1Test 100		")
(princ (TimeEval '(Cdr1Test 100)))
(terpri)
(princ "Cdr2Test 100		")
(princ (TimeEval '(Cdr2Test 100)))
(terpri)
(princ "CddrTest 100		")
(princ (TimeEval '(CddrTest 100)))
(terpri)
(princ "ListOnlyCdrTest1	")
(princ (TimeEval '(ListOnlyCdrTest1)))
(terpri)
(princ "ListOnlyCddrTest1	")
(princ (TimeEval '(ListOnlyCddrTest1)))
(terpri)
(princ "ListOnlyCdrTest2	")
(princ (TimeEval '(ListOnlyCdrTest2)))
(terpri)
(princ "ListOnlyCddrTest2	")
(princ (TimeEval '(ListOnlyCddrTest2)))
(terpri)
(princ "ReverseTest 10		")
(princ (TimeEval '(ReverseTest 10)))
(terpri)
(reclaim)
(princ "MyReverse1Test 10	")
(princ (TimeEval '(MyReverse1Test 10)))
(terpri)
(reclaim)
(princ "MyReverse2Test 10	")
(princ (TimeEval '(MyReverse2Test 10)))
(terpri)
(reclaim)
(princ "LengthTest 100		")
(princ (TimeEval '(LengthTest 100)))
(terpri)
(princ "ArithmeticTest 10000	")
(princ (TimeEval '(ArithmeticTest 10000)))
(terpri)
(princ "EvalTest 10000		")
(princ (TimeEval '(EvalTest 10000)))
(terpri)
(princ "tak 18 12 6		")
(princ (TimeEval '(topleveltak 18 12 6)))
(terpri)
(princ "gtak 18 12 6		")
(princ (TimeEval '(toplevelgtak 18 12 6)))
(terpri)
(princ "gtsta g0		")
(princ (TimeEval '(gtsta 'g0)))
(terpri)
(princ "gtsta g1		")
(princ (TimeEval '(gtsta 'g1)))
(terpri)

and

'(
(sstatus translink t)
(declare (localf tak gtak))
(def de (macro (x) (cons 'defun (cdr x))))
(def igreaterp (macro (x) (cons '> (cdr x))))
(def ilessp (macro (x) (cons '< (cdr x))))
(def iadd1 (macro (x) (cons '1+ (cdr x))))
(def isub1 (macro (x) (cons '1- (cdr x))))
(def itimes2 (macro (x) (cons '* (cdr x))))
(allocate 'fixnum 2000)
(allocate 'list 500)
(setq $gcprint t)
(defun time () (* (car (ptime)) 17))
(defun reclaim () (gc))
)
(de TestSetup ()
(progn
    (setq TestList (PrepareTest 1000))
    (setq TestList2 (PrepareTest 2000))
    (MakeLongList)
    (setq EvalForm '(setq Foo (cadr '(1 2 3))))))

(de MakeLongList ()
(prog (I)
    (setq LongList '(a b c d e f g h i j k l m n o p q r s t u v w x y z))
    (setq I 0)
loop
    (cond ((igreaterp I 5) (return nil)))
    (setq LongList (append LongList LongList))
    (setq I (iadd1 I))
    (go loop)))

(de PrepareTest (n)
   (prog (l i)
      (setq i -1 l nil)
      top
      (cond ((ilessp n i) (return l)))
      (setq i (iadd1 i)
	    l (cons nil l))
      (go top)))

(de Cdr1Test (N)
(prog (I L)
    (setq I -1)
loop
    (setq I (iadd1 I))
    (setq L LongList)
    (cond ((igreaterp I N) (return nil)))
loop1
    (cond ((atom (setq L (cdr L))) (go loop)))
    (go loop1)))

(de Cdr2Test (N)
(prog (I L)
    (setq I -1)
loop
    (setq I (iadd1 I))
    (setq L LongList)
    (cond ((igreaterp I N) (return nil)))
loop1
    (cond ((null (setq L (cdr L))) (go loop)))
    (go loop1)))

(de CddrTest (N)
(prog (I L)
    (setq I -1)
loop
    (setq I (iadd1 I))
    (setq L LongList)
    (cond ((igreaterp I N) (return nil)))
loop1
    (cond ((null (setq L (cddr L))) (go loop)))
    (go loop1)))

(de ListOnlyCdrTest1 ()
   (prog (l1 l2)
      (setq l1 TestList)
      top
      (setq l2 TestList)
      again
      (cond ((null (setq l2 (cdr l2)))
	     (cond ((null (setq l1 (cdr l1)))
		    (return nil))
		   (t (go top))))
	    (t (go again)))))

(de ListOnlyCddrTest1 ()
   (prog (l1 l2)
      (setq l1 TestList2)
      top
      (setq l2 TestList2)
      again
      (cond ((null (setq l2 (cddr l2)))
	     (cond ((null (setq l1 (cddr l1)))
		    (return nil))
		   (t (go top))))
	    (t (go again)))))

(de ListOnlyCdrTest2 ()
   (prog (l1 l2)
      (setq l1 TestList)
      top
      (setq l2 TestList)
      again
      (cond ((atom (setq l2 (cdr l2)))
	     (cond ((atom (setq l1 (cdr l1)))
		    (return nil))
		   (t (go top))))
	    (t (go again)))))

(de ListOnlyCddrTest2 ()
   (prog (l1 l2)
      (setq l1 TestList2)
      top
      (setq l2 TestList2)
      again
      (cond ((atom (setq l2 (cddr l2)))
	     (cond ((atom (setq l1 (cddr l1)))
		    (return nil))
		   (t (go top))))
	    (t (go again)))))

(de EmptyTest (N)
(prog (I)
    (setq I 0)
loop
    (cond ((igreaterp I N) (return nil)))
    (setq I (iadd1 I))
    (go loop)))

(de SlowEmptyTest (N)
(prog (I)
    (setq I 0)
loop
    (cond ((greaterp I N) (return nil)))
    (setq I (add1 I))
    (go loop)))

(de ReverseTest (N)
(prog (I)
    (setq I 0)
loop
    (cond ((igreaterp I N) (return nil)))
    (reverse LongList)
    (setq I (iadd1 I))
    (go loop)))

(de MyReverse1Test (N)
(prog (I)
    (setq I 0)
loop
    (cond ((igreaterp I N) (return nil)))
    (myreverse1 LongList)
    (setq I (iadd1 I))
    (go loop)))

(de myreverse1 (L)
(prog (M)
loop
    (cond ((atom L) (return M)))
    (setq M (cons (car L) M))
    (setq L (cdr L))
    (go loop)))

(de MyReverse2Test (N)
(prog (I)
    (setq I 0)
loop
    (cond ((igreaterp I N) (return nil)))
    (myreverse2 LongList)
    (setq I (iadd1 I))
    (go loop)))

(de myreverse2 (L)
(prog (M)
loop
    (cond ((null L) (return M)))
    (setq M (cons (car L) M))
    (setq L (cdr L))
    (go loop)))

(de LengthTest (N)
(prog (I)
    (setq I 0)
loop
    (cond ((igreaterp I N) (return nil)))
    (length LongList)
    (setq I (iadd1 I))
    (go loop)))

(de Fact (N)
    (cond ((ilessp N 2) 1) (t (itimes2 N (Fact (isub1 N))))))

(de ArithmeticTest (N)
(prog (I)
    (setq I 0)
loop
    (cond ((igreaterp I N) (return nil)))
    (Fact 9)
    (setq I (iadd1 I))
    (go loop)))

(de EvalTest (N)
(prog (I)
    (setq I 0)
loop
    (cond ((igreaterp I N) (return nil)))
    (eval EvalForm)
    (setq I (iadd1 I))
    (go loop)))

(de TimeEval (Form)
(prog (I)
    (setq I (time))
    (eval Form)
    (return (difference (time) I))))

(de topleveltak (x y z) (tak x y z))

(de tak (x y z)
  (cond ((null (ilessp y x))  z)
	(t (tak (tak (isub1 x) y z)
		(tak (isub1 y) z x)
		(tak (isub1 z) x y)))))

(de toplevelgtak (x y z) (gtak x y z))

(de gtak (x y z)
  (cond ((null (lessp y x))  z)
	(t (gtak (gtak (sub1 x) y z)
		(gtak (sub1 y) z x)
		(gtak (sub1 z) x y)))))

(de gtsta (F)
  (prog (I)
    (setq I 1)
Loop
    (cond ((igreaterp I 100000) (return nil)))
    (apply F (list I))
    (setq I (iadd1 I))
    (go Loop)))

(de gtstb (F)
  (prog (I)
    (setq I 1)
Loop
    (cond ((igreaterp I 100000) (return nil)))
    (funcall F I)
    (setq I (iadd1 I))
    (go Loop)))

(de g0 (X) X) 
(de g1 (X) (iadd1 X))

(de nreverse (x)
  (nreconc x nil))

(de nreconc (x y)
 (prog (z)
   L (cond ((atom x) (return y)))
      (setq z x)
      (setq x (cdr x))
      (setq y (rplacd z y))
      (go L)))

(de nnils (N)
  (prog (LST i)
    (setq i 0)
loop
    (cond ((igreaterp i N) (return LST)))
    (setq LST (cons nil LST))
    (setq i (iadd1 i))
    (go loop)))

(de nils (N)
  (setq XX (nnils N))
  N)

(de nr ()
  (setq XX (nreverse XX))
  nil)


M
-------

∂07-Apr-82  1051	Mike Genesereth <CSD.GENESERETH at SU-SCORE> 	machine timings    
Date:  7 Apr 1982 1051-PST
From: Mike Genesereth <CSD.GENESERETH at SU-SCORE>
Subject: machine timings
To: rpg at SU-AI

As you know Fairchild has at least one of everything, and so Harry
Barrow decided to do some comparisons amongst machines.  Here is his
report.

Mail-From: BARROW created at 26-Mar-82 13:24:58
Date: 26 Mar 1982 1324-PST
From: Barrow at FLAIR-20 (Harry Barrow)
Subject: More FFT benchmarks
To: AI-researchers at FLAIR-20
cc: Barrow at FLAIR-20

I have now rewritten the FFT benchmark test in Interlisp and run it
on the 2060 and the Dolphin.   The results follow (and are also in
<barrow.lisp>fft.time ).   I guess that running the interpreter is 
probably a fair test of non-numerical ability, and running the
compiled version is a fair test of floating point and array access...


Timings for Dick Duda's FFT function operating on a 1024 element array of data.


---------------------------------------------------------------
| Machine & |	 Interpreted  |   Compiled	| Interpreted |
| Language  |	Secs	Ratio |	Secs	Ratio	| /Compiled   |
---------------------------------------------------------------
|	    |		      |			|	      |
| 2060	    |	 28.7	1.0   |	 0.532	  1.0	|  53.9	      |
| Maclisp   |		      |			|	      |
|	    |		      |			|	      |
| LispM	    |	 97.2	3.39  |	 3.52	  6.62	|  27.6       |
| Zetalisp  |		      |			|	      |
|	    |		      |			|	      |
| Vax	    |	135.5	4.72  |	66.5	125.0	|   2.04      |
| Franzlisp |		      |			|	      |
|	    |		      |			|	      |
| 2060	    |	 37.3	1.30  | 12.6	 23.68	|   2.96      |
| Interlisp |		      |			|	      |
|	    |		      |			|	      |
| Dolphin   |	431.7	15.0  |	149.1	280.3	|   1.54      |
| Interlisp |		      |			|	      |
|	    |		      |			|	      |
---------------------------------------------------------------
-------

∂24-Apr-82  0010	Howard I. Cannon <HIC at MIT-MC> 	On the air again     
Date: 24 April 1982 03:10-EST
From: Howard I. Cannon <HIC at MIT-MC>
Subject:  On the air again   
To: RPG at SU-AI

I will try to provide reasonable response for the Symbolics LM-2.  We won't
be able to give out 3600 numbers until early July, I think.  Just confirming.

BTW, I'll be out West the first week of May.  Perhaps we should get together.

--Howard

∂24-Apr-82  0611	Martin.Griss <Griss at UTAH-20> 	Re: On the air again       
Date: 24 Apr 1982 0707-MST
From: Martin.Griss <Griss at UTAH-20>
Subject: Re: On the air again   
To: RPG at SU-AI
cc: Griss at UTAH-20
In-Reply-To: Your message of 24-Apr-82 0008-MST

PSL, minor now, hope to become more significant. Have just begun to
do some some pre-release distributions. Are you inreterest in a DEC-20 and or
VAX copy?
-------

∂24-Apr-82  0756	Scott E. Fahlman <FAHLMAN at CMU-20C> 	Re: On the air again      
Date: 24 Apr 1982 1051-EST
From: Scott E. Fahlman <FAHLMAN at CMU-20C>
To: RPG at SU-AI
Subject: Re: On the air again   
Message-ID: <820323105153FAHLMAN@CMU-20C>
Regarding: Message from Dick Gabriel <RPG at SU-AI>
              of 24-Apr-82 0208-EST

Dick,

Your note comes at a good time.  As you know, several of us at CMU are
doing a Common Lisp for the VAX, based on Spice Lisp.  The fellow at DEC
who runs their end of the project has been very hot to get some
benchmarks.  Even though we are not yet ready to run them, and won't be
for a month or two, he wants to show his management what a set of Lisp
benchmarks might look like.  His name is Gary Brown.  He may be
contacting you for a few mroe details of what you plan to do.

Cheers,
Scott
   --------

∂24-Apr-82  0832	MASINTER at PARC-MAXC 	small benchmarks 
Date: 24 APR 1982 0832-PST
From: MASINTER at PARC-MAXC
Subject: small benchmarks
To:   RPG at SU-AI
cc:   masinter

A suggestion for machines with cache: small benchmarks often
don't have typical cache behavior. One simple experiment to try
is to replicate the benchmark function. For example, you can
take TAK, and make 100 different versions, where TAK1 calls
TAK2 TAK3 TAK4 and TAK5, TAK2 calls TAK6 TAK7 and TAK8, etc.
TAK100 can call TAK1 TAK2 ...

This not only eliminates recursion removal, it eliminates the
extreme localityof code. I think it will change the relative rankings
of machines quite a bit.

-------

Some more benchmarks:

DOES YOUR COMPILER DO CONSTANT FOLDING BENCHMARK:

(LET ((X 1000) (Y 1234) (Z 1976))
   (TIMES 3.4 X 193.2 Y 1 2 3 4 5 6 7 8 9 10 11 .01 .03 Z]

do it 10000 times.


DEEP OR SHALLOW BINDING BENCHMARK:

The DEEPBINDERS benchmark has all variables special, but only
occasionally used. For example, an argument TOLERANCE is bound
at every recursive call, but the sub-function only uses it
at the leaves.

The SHALLOWBINDERS benchmark binds variables at one level and
uses them at every level of recursion, including very deep ones.

------

TAK using no arithmetic is also instructive, since it filters
out the different integer representation methods and their
performance advantages. This is simply done by using lists
instead of numbers. (- X 1) -> (CDR X) and GREATERP -> LONGERP.

As the PSL timings show, the TAK times are completely swamped
by the differences between generic and integer arithmetic primitives
in some implementations. Do the translators get to generate
programs which don't complain if given incorrect arguments? (e.g.
(TAK 'YES 'NO 'MAYBE) does it return a result?)

-----------

Size of compiled code: 

Working set is a very important consideration in overall system
performance. None of the "benchmarks" talk about the size of the
code generated. I don't know exactly how this should be measured--
maybe in "total number of bits"?

--------

Elapsed vs. "cpu" time:
The "cpu" time reported in timesharing system often doesn't include
background activity of memory management, etc. These of course don't
show up in tiny benchmarks, either. At the minimum, the times reported
should include elapsed time ...


(more later. Should this go in our paper? I need to go now.)

Larry

∂24-Apr-82  1102	Glenn S. Burke <GSB at MIT-ML> 	Major Dialects, fyi    
Date: 24 April 1982 14:02-EST
From: Glenn S. Burke <GSB at MIT-ML>
Subject: Major Dialects, fyi
To: RPG at SU-AI

I suppose i might be brash enough to consider myself (meaning NIL)
a major dialect.  I am working on it full time now, excepting time
out for putting out fires of other other natures (recently restored
a disk pack from tape on ML after a head crash, using Maclisp).
Presumably george will continue to do timings of things as he has in
the past however.

∂24-Apr-82  1206	Greenberg.Symbolics at MIT-MULTICS 	Re: On the air again    
Date:  24 April 1982 15:07 est
From:  Greenberg.Symbolics at MIT-MULTICS
Subject:  Re: On the air again
To:  Dick Gabriel <RPG at SU-AI>
In-Reply-To:  Msg of 04/24/82 02:08 from Dick Gabriel

Perhaps a statement of which of us you consider sufficiently major
would be of value to all of us.

∂25-Apr-82  1423	Martin.Griss <Griss at UTAH-20> 	Re: Lisps I want to see    
Date: 25 Apr 1982 1515-MDT
From: Martin.Griss <Griss at UTAH-20>
Subject: Re: Lisps I want to see
To: RPG at SU-AI
cc: Griss at UTAH-20
In-Reply-To: Your message of 25-Apr-82 1449-MDT

Is it in fact true that all those LISP currently run on all those machines,
or is that just your expectation for this coming period.

No 360/370 lisps mentioned.

Howabout T from YAle. Do you know what its state is? I cant seem to geta  straight
answer from O'Donnell. Does it actually run on VAX, 20 and Apollo...
-------

∂25-Apr-82  1719	Scott E. Fahlman <FAHLMAN at CMU-20C> 	Re: Lisps I want to see   
Date: 25 Apr 1982 2007-EDT
From: Scott E. Fahlman <FAHLMAN at CMU-20C>
To: RPG at SU-AI
Subject: Re: Lisps I want to see
Message-ID: <820324200748FAHLMAN@CMU-20C>
Regarding: Message from Dick Gabriel <RPG at SU-AI>
              of 25-Apr-82 1649-EDT

Dick,
We will be doing benchmarks on Common Lisp (Spice) for Perq, 3600, Vax 750,
and Vax 780, as these implementations become available.  I'll have to chec
with DEC about releasing the Vax numbers before we get all the optimizations
installed, but I will advocate that they allow this, since such information
is clearly of interest to the Lisp community.  Probably we will have Perq
and Vax numbers starting sometime in June.
-- Scott
   --------

∂27-Apr-82  1102	Kim.jkf at Berkeley 	Re: franz tak benchmarks     
Date: 27 Apr 1982 10:59:55-PDT
From: Kim.jkf at Berkeley
To: RPG@SU-AI
Subject: Re: franz tak benchmarks    
In-reply-to: Your message of 26 Apr 1982 1330-PDT

  The local function call fits in the gap between macros and lisp
functions.  Using macros will eliminate function calls at the
expense of space for expanding the macro all of the time.
Using a standard lisp function means that the code for the function
is not dupilcated, but now you have to pay the price of going through
the function calling protocol.  In Franz, function calling is made
more expensive due to
  1) our use of the VAX 'calls' instruction, which does more than we need,
	but which is required if we want to call C programs.  PSL and
	NIL also use 'calls'.
  2) the fact that calls must be able to end up in the interpreter if
	the user so desires.   This flexibility costs something in 
	all lisp systems I expect.

In a local function call, we use the quicker 'jsb' instruction which
is very similar to 'pushj' on the 10.  Also, the call is directly to
the subroutine in question, eliminating the cost of the linkages (and
the ability to debug a function).  Currently a local function cannot
be called from the interpreter, but it wouldn't be hard to 
generate an interpreter callable stub which would just call the
real local function.  We've talked about this but so far haven't
been inspired to do anything about it.

 If you are interested, I can mail you a 'systems programmer' manual
for franz lisp, which describes calling sequences and other internal
details.




∂02-Apr-82  0950	Walter van Roggen <VANROGGEN at CMU-20C> 	lisp benchmarks   
Date:  2 Apr 1982 1243-EST
From: Walter van Roggen <VANROGGEN at CMU-20C>
Subject: lisp benchmarks
To: rpg at SU-AI

Scott mentioned that you had gathered together some kind of benchmarks
for Lisp. Could you send me a pointer to them, or better yet, mail
me the files if they are not too large?

Thanks			---Walter
-------

∂26-Apr-82  1222	HEDRICK at RUTGERS (Mgr DEC-20s/Dir LCSR Comp Facility) 	Re: Lisps I want to see
Date: 26 Apr 1982 1519-EDT
From: HEDRICK at RUTGERS (Mgr DEC-20s/Dir LCSR Comp Facility)
Subject: Re: Lisps I want to see
To: RPG at SU-AI
In-Reply-To: Your message of 25-Apr-82 1649-EDT

How much would it bother you if we don't benchmark the old UCILisp?
We are upgrading Elisp so it has features that make conversion from
Maclisp, etc.  IN particular, records and ` are both there, and
Unwind-protect will be shortly.
-------

Not much, though if you think there is some budding user community out 
there, including a `volunteer' to do the timing then I think we ought
to get it done. I don't see much future for UCILisp nor MacLisp, but the
latter provides a kind of standard to measure against.
			-rpg-


∂28-Apr-82  1316	RPG  	Gross Lossage 
To:   lisptranslators at SU-AI   
As Mabry Tyson correctly points out, what we have been calling TAK
isn't what Takeuchi calls TAK. Here are the 2 versions:

Ours:

(defun tak (x y z)
       (cond ((not (< y x))
	      z)
	     (t (tak (tak (1- x) y z)
		     (tak (1- y) z x)
		     (tak (1- z) x y))))) 
    
----------------------------

His:
(DEFUN TAK (X Y Z)
       (COND ((> X Y)
	      (TAK (TAK (1- X) Y Z) 
		   (TAK (1- Y) Z X)
		   (TAK (1- Z) X Y) ))
	     (T Y) ))

What we have been calling TAK is a function that JMC (John McCarthy) decided
to benchmark on a few machines for a quick comparison. I then decided to
do it extensively and people followed suit. So, there are several choices:

	1. Redo all the timings (I currently have 54) using the correct TAK.
	2. Rename our function JMC and report the findings of that benchmark.

The real TAK goes deeper for various values than our TAK, but both measure
the same thing. 

So, let's hear your vote!

∂28-Apr-82  1456	JonL at PARC-MAXC 	Re: Gross Lossage    
Date: 28 Apr 1982 14:53 PDT
From: JonL at PARC-MAXC
Subject: Re: Gross Lossage
In-reply-to: RPG's message of 28 Apr 1982 1316-PDT
To: Dick Gabriel <RPG at SU-AI>
cc: lisptranslators at SU-AI

Although the facilities tested by the two benchmarks, JMC and true TAK,
are essentially the same, I'd at least like to see the renaming occur, since
the true TAK is more sensitive to ideas like "cacheing" of sub-recursive
results, and PDL depth problems.


∂29-Apr-82  2158	MASINTER at PARC-MAXC 	MAS benchmark    
Date: 29 APR 1982 2158-PDT
From: MASINTER at PARC-MAXC
Subject: MAS benchmark
To:   LispTranslators at SU-AI

I propose the following function as a measurement of machines
function/call CDR performance, independent of small number arithmetic.
I call this function MAS, in honor of Dr. Masinter who invented it:

(FILECREATED "28-APR-82 22:17:28" <MASINTER>MAS.;1 768    

     changes to:  MASCOMS LISTN MAS SHORTERP)


(PRETTYCOMPRINT MASCOMS)

(RPAQQ MASCOMS ((FNS LISTN MAS SHORTERP)))
(DEFINEQ

(LISTN
  [LAMBDA (N)                                   (* lmm "28-APR-82 21:41")
    (COND
      ((ZEROP N)
	NIL)
      (T (CONS N (LISTN (SUB1 N])

(MAS
  [LAMBDA (X Y Z)                               (* lmm "28-APR-82 21:39")
    (COND
      ((NOT (SHORTERP Y X))
	Z)
      (T (MAS (MAS (CDR X)
		   Y Z)
	      (MAS (CDR Y)
		   Z X)
	      (MAS (CDR Z)
		   X Y])

(SHORTERP
  [LAMBDA (X Y)                                 (* lmm "28-APR-82 21:38")
    (AND Y (OR (NULL X)
	       (SHORTERP (CDR X)
			 (CDR Y])
)

Benchmark is called

(MAS (LISTN 18) (LISTN 12) (LISTN 6))

∂29-Apr-82  2244	MASINTER at PARC-MAXC 	non-local TAK    
Date: 29 APR 1982 2243-PDT
From: MASINTER at PARC-MAXC
Subject: non-local TAK
To:   LispTranslators at SU-AI

I tried making a version of TAK where there were 100 different functions,
with unpredictable calling patterns among them, to destroy the code
locality. This should have a performance penalty on machines with caches.
I got performance degradation of 10-20% on the ISIB 2060. I imagine
it would be worse in MacLisp, where the number unbox and call/return
is all inline. I tried some random permutations,
as well as some pseudo-random ones. I didn't spend enough
time understanding the structure of TAK enough to construct
a pessimal example.

(DEFINEQ

(DEFTAKS
  [LAMBDA NIL                                   (* lmm "29-APR-82 22:07")
    (for I from 0 to 99 join (DTK I (ADD1 I)
				  (ITIMES (ADD1 I 37))
				  (ITIMES (ADD1 I 11))
				  (ITIMES (ADD1 I 17])

(DTK
  [LAMBDA (N A B C D)                           (* lmm "29-APR-82 22:05")
    (DEFTAK (PK N)
	    (PK A)
	    (PK B)
	    (PK C)
	    (PK D])

(PK
  [LAMBDA (N)                                   (* lmm "29-APR-82 22:06")
    (PACK* (QUOTE TAK)
	   (IREMAINDER N 100])

(DEFTAK
  [LAMBDA (TAK1 TAK2 TAK3 TAK4 TAK5)            (* lmm "29-APR-82 22:02")
    (DEFINE (BQUOTE ((, TAK1 (LAMBDA (X Y Z)
			  (COND
			    ((NOT (ILESSP Y X))
			      Z)
			    (T (, TAK2 (, TAK3 (SUB1 X)
					  Y Z)
				  (, TAK4 (SUB1 Y)
				     Z X)
				  (, TAK5 (SUB1 Z)
				     X Y])
)

∂01-May-82  1044	JonL at PARC-MAXC 	Re: Gross LossageD   
Date: 1 May 1982 10:42 PDT
From: JonL at PARC-MAXC
Subject: Re: Gross LossageD
To: Dick Gabriel <RPG at SU-AI>
cc: lisptranslators at SU-AI

One more fine point about the original TAK -- it used generic names for
the arithmetic, so the "test" would include annotating whether the generic 
names had to be converted to type-specific, or whether there is a declarational
facility to do this automatically.  Below is part of my note of Oct 1980 , which
reproduced the letter from Japan containing the Takeuchi function and the
particular "benchmark" call to it called TARAI-4.

Recently, several persons have sent modifications to TAK/JMC to test out
other facilities;  maybe it would be good to have a list of the things
purported to be tested.  As I recollect, they are now
  1) Function-to-function interface
  2) Basic arithmetic capability
  3) Generic versus Type-specific arithmetic
  4) FUNCALL/APPLY* interface
  5) Effect of code locality (penalty to "cached" memory for non-local)
  6) Sensitivity to Stack depth and/or recursion level
Any more?


Date: 19 October 1980 06:09-EDT
From: Jon L White <JONL at MIT-MC>
Subject: Interesting note from Japan
To: LISP-DISCUSSION at MIT-MC

    On September 25, 1980, Mr. Shigeki Goto of the Electical 
Communication Laboratories (Nippon Telegraph and Telephone Co., in 
Tokyo) sent me a note . . . excerpt from his note (between doublequotes):

"    Mr. Nobuyasu Ohsato, one of my colleagues at Musashino
 ECL, has compared the execution speed of various LISP systems.
. . . 
 (*) TARAI-4 is (TAK 4 2 0), where TAK is an interesting function
 defined by Mr. Ikuo Takeuchi.
 (DEFUN TAK (X Y Z)
	(COND ((GREATERP X Y)
	       (TAK (TAK (SUB1 X) Y Z) 
		    (TAK (SUB1 Y) Z X)
		    (TAK (SUB1 Z) X Y) ))
	      (T Y) ))
"
. . . 

∂28-Apr-82  1248	Mabry Tyson <Tyson at SRI-AI> 	TAK function!#"%&$$"&#( 
Date: 28 Apr 1982 1242-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: TAK function!#"%&$$"&#(
To: rpg at SU-AI
cc: pmartin at SRI-AI, stickel at SRI-AI

Your message:

    Date: 25 Apr 1982 1400-PDT
    From: Dick Gabriel <RPG at SU-AI>
    Subject: Takeuchi 
    To:   lisptranslators at SU-AI   
    
    Here is the TAK benchmark plus the timings so far for the
    case (TAK 18. 12. 6.) Also included are the version that GJC provided as an
    additional test of FUNCALL technology. Please do at least the straight TAK
    version, and possibly the TAKF version. I want to do a FUNCALL test, but
    probably I want to remove arithmetic from the measurement.
    
    
    (defun tak (x y z)
           (cond ((not (< y x))
	 	      z)
    		     (t (tak (tak (1- x) y z)
        		     (tak (1- y) z x)
    			     (tak (1- z) x y))))))
    
    
----------------------------

One I got back in 1980 from the SCORE bboard:

    19 OCT 1980 0632-EDT	JONL at MIT-MC (Jon L White)	Interesting note from Japan
    I sent the following note out to the LISP-FORUM at MIT-MC,
    and to a few others, but you didn't seem to be on any of
    those lists, so here's a separate mailing for something
    you may find amusing.
		...
     (*) TARAI-4 is (TAK 4 2 0), where TAK is an interesting function
     defined by Mr. Ikuo Takeuchi.
     (DEFUN TAK (X Y Z)
    	(COND ((GREATERP X Y)
    	       (TAK (TAK (SUB1 X) Y Z) 
    		    (TAK (SUB1 Y) Z X)
    		    (TAK (SUB1 Z) X Y) ))
    	      (T Y) ))

←←←←←←←←←←←←←←←←←←←←←←←

Hey!  You used a different version of TAK!!!  The older version seems
to run almost forever (at least 20 times longer at which point I stopped)
when given those args.  It took a long time for me to figure out that
my old code wasn't at fault!

How about sending out a warning (and explanation?) in case anyone else
tries those args with the old TAK.
-------

∂28-Apr-82  1325	Mabry Tyson <Tyson at SRI-AI> 	Re: Gross Lossage  
Date: 28 Apr 1982 1322-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: Re: Gross Lossage 
To: RPG at SU-AI
In-Reply-To: Your message of 28-Apr-82 1316-PDT

Obviously we should stay with your version rather than redo the timings.
-------

∂28-Apr-82  1954	Martin.Griss <Griss at UTAH-20> 	Re: Gross Lossage     
Date: 28 Apr 1982 2052-MDT
From: Martin.Griss <Griss at UTAH-20>
Subject: Re: Gross Lossage 
To: RPG at SU-AI
cc: Griss at UTAH-20
In-Reply-To: Your message of 28-Apr-82 1416-MDT

WEither is fine by me; JMC will do.
-------

∂28-Apr-82  2209	Scott E. Fahlman <FAHLMAN at CMU-20C> 	Re: Gross Lossage    
Date: 29 Apr 1982 0057-EDT
From: Scott E. Fahlman <FAHLMAN at CMU-20C>
To: RPG at SU-AI
Subject: Re: Gross Lossage 
Message-ID: <820328005738FAHLMAN@CMU-20C>
Regarding: Message from Dick Gabriel <RPG at SU-AI>
              of 28-Apr-82 1616-EDT

I vote to rename the function JMC.  The only reason for re-doing everything
would be if there are lots of datapoints available for the real TAK.  (Sigh!)
-- Scott
   --------

∂29-Apr-82  1232	Mabry Tyson <Tyson at SRI-AI> 	Re: Gross Lossage  
Date: 29 Apr 1982 1227-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: Re: Gross Lossage 
To: RPG at SU-AI
In-Reply-To: Your message of 28-Apr-82 1316-PDT

In thinking about it, I would prefer to call the modified TAK something like
TAK2 or TAK'.  Naming it JMC would be like naming a trivial modification of
Ackerman's function JD if John Doe made the modification.  That would be ok if
it weren't going to be published under that name.  Now, if there are some
important differences between the two, that's something different.  But I
suspect that McCarthy simply tried to remember the function and didn't quite
get it right.
-------

There is a minor difference in terms of what this function measures in
that it number conses more distinct numbers, though I can't think on
any implementation that would be affected.

My plan was to call it TAK'.
			-rpg-
∂01-May-82  2326	HEDRICK at RUTGERS (Mgr DEC-20s/Dir LCSR Comp Facility) 	a couple of benchmark results    
Date:  2 May 1982 0222-EDT
From: HEDRICK at RUTGERS (Mgr DEC-20s/Dir LCSR Comp Facility)
Subject: a couple of benchmark results
To: rpg at SU-AI
cc: josh at RUTGERS, fisCHER at RUTGERS

Here are the first set of results.  Note that the R/UCI Lisp results
could be changed by giving it more free space.  Also, there is a certain
variability in all of these, so about all I would stake much on is that
Elisp and R/UCI Lisp are similar in speed.  Note that R/UCI Lisp was
done with NOUUO NIL, which caused function calls to turn into direct
PUSHJ's, thus making debugging impossible.  In Elisp there is no such
choice.  You can always define functions.  The overhead is using an
indirect PUSHJ instead of a direct PUSHJ.  Also note that in both cases
you are seeing generic arithmetic.  

By the way, it took a fair amount of work to get Elisp to do this well.
I had not bothered to code the arithmetic routines carefully the first
time through, so this project made me get around to making a second pass
through that.  I also cut a few instructions out of function calls, and
I rearranged the distribution of various things in memory.  Before I did
that, in TAKF changing the variable F to FF speeded up the function by a
factor of 5.  That is because F is a builtin variable and FF is not.
The initial data turned out to be in a bad spot.  So I went over the
address space usage and got things so that there are no addressing
conflicts.  (I thought I had done that before, but it turns out I
goofed.)  As far as I know, none of this work is peculiar to these
particular benchmarks.

Note that in BENCH, the middle numbers are missing, as we don't have
BIGNUM's.  All numbers are in seconds.  The ones in () are for GC.

In Elisp, all final calls are turned into jumps.  A tailrecursive
function does in fact turn into a loop.  I think the R/UCI compiler does
the same, but I am not as familiar with at.  (As you may know, the Elisp
compiler is a modified Utah PSL compiler from about a year ago.)

Elisp

(bench 10)   .893 (0)	   1.018 (0)
(bench 15)  4.919 (.809)   4.416 (.221)
(tak 18 12 6)     1.063 (0)
(takf 18 12 6)    2.094 (0)

R/UCI Lisp, NOUUO

(bench 10)   1.137 (.276)    2.658 (1.404)
(bench 15)   7.162 (2.926)  20.208 (14.453)
(tak 18 12 6)     .969 (0)
(takf 18 12 6)   3.157 (0)
-------

∂03-May-82  2016	HEDRICK at RUTGERS (Mgr DEC-20s/Dir LCSR Comp Facility) 	more timing results    
Date:  3 May 1982 2314-EDT
From: HEDRICK at RUTGERS (Mgr DEC-20s/Dir LCSR Comp Facility)
Subject: more timing results
To: rpg at SU-AI

I have a minor correction on the Puzzle benchmark.  I forgot to set
NOUUO NIL for R/UCI Lisp, to get PUSHJ calls.  So here are the full
numbers.

Elisp:  25.133 sec.
R/UCI Lisp: 75.622 sec.
R/UCI Lisp with NOUUO NIL:  25.840 sec.

I just tried MAS, Masinter's version of TAK using lists and CDR.
The results are

Elisp:  2.389 sec.
R/UCI Lisp:  12.816 sec.
R/UCI Lisp with NOUUO NIL: 3.886 sec.

The TAK I did was the first one you sent out.  I don't care which
one you use.  Tell me if you want me to do the other one.  I am
postponing Masinter's newest, randomized, TAK, until I figure out
how to translate it from the Interlisp.
-------

∂04-May-82  0021	RPG  	Warning! Extreme Danger Ahead!!   
To:   lisptranslators at SU-AI   
Tomorrow (Tuesday, May 4, 1982) I will be sending the MacLisp version of
Masinter's cache befuddling benchmark by netmail. There will be 101
functions in that file totalling about 731 lines of text.  If you do not
want this to appear in your mail file, let me know soon.  I want to avoid
all (some) of you trying to hack them all together in various ways,
wasting your time.

There are a couple of things about Masinter's benchmark. First, I assumed
that he meant (ITIMES (ADD1 I) 37.) etc where he had stated
(ITIMES (ADD1 I 37.)) etc. I verified that every function is called,
and even gathered statistics about which ones where called how often
(the average is about 636 times). You will also notice that TAK99
calls only TAK0 (a bad boundary condition in Larry's benchmark). I
finally verified that if you assume that TAKn is TAK that they have
the same behavior (in case anyone doubted it). So, unless I hear
from you before I feel devilish, there will be MANY BITS heading
*your* way!!!
			-rpg-

∂04-May-82  1259	RPG  	Warning!!! Many Bits Below!!!
To:   lisptranslators at SU-AI   

Here is the threatened MacLisp version of Larry Masinter's
cache destroying TAK' function. Included is the timer I use:

;;; Gross MacLisp Version
(defun timit ()
       ((lambda (t1 x gt)
		(tak0 18. 12. 6.)
		(setq t1 (- (runtime) t1))
		(setq gt (- (status gctime) gt))
		(print (list 'runtime
			     (//$ (float  (- t1 gt))
				  1000000.0)))
		(print (list 'gctime
			     (//$ (float gt) 1000000.0))))
	(runtime) ()(status gctime)))


(*rset (nouuo ()))

 (DEFUN TAK0 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK1 (TAK37 (1- X) Y Z)
		   (TAK11 (1- Y) Z X)
		   (TAK17 (1- Z) X Y)))))
  (DEFUN TAK1 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK2 (TAK74 (1- X) Y Z)
		   (TAK22 (1- Y) Z X)
		   (TAK34 (1- Z) X Y)))))
  (DEFUN TAK2 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK3 (TAK11 (1- X) Y Z)
		   (TAK33 (1- Y) Z X)
		   (TAK51 (1- Z) X Y)))))
  (DEFUN TAK3 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK4 (TAK48 (1- X) Y Z)
		   (TAK44 (1- Y) Z X)
		   (TAK68 (1- Z) X Y)))))
  (DEFUN TAK4 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK5 (TAK85 (1- X) Y Z)
		   (TAK55 (1- Y) Z X)
		   (TAK85 (1- Z) X Y)))))
  (DEFUN TAK5 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK6 (TAK22 (1- X) Y Z)
		   (TAK66 (1- Y) Z X)
		   (TAK2 (1- Z) X Y)))))
  (DEFUN TAK6 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK7 (TAK59 (1- X) Y Z)
		   (TAK77 (1- Y) Z X)
		   (TAK19 (1- Z) X Y)))))
  (DEFUN TAK7 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK8 (TAK96 (1- X) Y Z)
		   (TAK88 (1- Y) Z X)
		   (TAK36 (1- Z) X Y)))))
  (DEFUN TAK8 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK9 (TAK33 (1- X) Y Z)
		   (TAK99 (1- Y) Z X)
		   (TAK53 (1- Z) X Y)))))
  (DEFUN TAK9 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK10 (TAK70 (1- X) Y Z)
		    (TAK10 (1- Y) Z X)
		    (TAK70 (1- Z) X Y)))))
  (DEFUN TAK10 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK11 (TAK7 (1- X) Y Z)
		    (TAK21 (1- Y) Z X)
		    (TAK87 (1- Z) X Y)))))
  (DEFUN TAK11 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK12 (TAK44 (1- X) Y Z)
		    (TAK32 (1- Y) Z X)
		    (TAK4 (1- Z) X Y)))))
  (DEFUN TAK12 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK13 (TAK81 (1- X) Y Z)
		    (TAK43 (1- Y) Z X)
		    (TAK21 (1- Z) X Y)))))
  (DEFUN TAK13 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK14 (TAK18 (1- X) Y Z)
		    (TAK54 (1- Y) Z X)
		    (TAK38 (1- Z) X Y)))))
  (DEFUN TAK14 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK15 (TAK55 (1- X) Y Z)
		    (TAK65 (1- Y) Z X)
		    (TAK55 (1- Z) X Y)))))
  (DEFUN TAK15 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK16 (TAK92 (1- X) Y Z)
		    (TAK76 (1- Y) Z X)
		    (TAK72 (1- Z) X Y)))))
  (DEFUN TAK16 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK17 (TAK29 (1- X) Y Z)
		    (TAK87 (1- Y) Z X)
		    (TAK89 (1- Z) X Y)))))
  (DEFUN TAK17 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK18 (TAK66 (1- X) Y Z)
		    (TAK98 (1- Y) Z X)
		    (TAK6 (1- Z) X Y)))))
  (DEFUN TAK18 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK19 (TAK3 (1- X) Y Z)
		    (TAK9 (1- Y) Z X)
		    (TAK23 (1- Z) X Y)))))
  (DEFUN TAK19 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK20 (TAK40 (1- X) Y Z)
		    (TAK20 (1- Y) Z X)
		    (TAK40 (1- Z) X Y)))))
  (DEFUN TAK20 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK21 (TAK77 (1- X) Y Z)
		    (TAK31 (1- Y) Z X)
		    (TAK57 (1- Z) X Y)))))
  (DEFUN TAK21 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK22 (TAK14 (1- X) Y Z)
		    (TAK42 (1- Y) Z X)
		    (TAK74 (1- Z) X Y)))))
  (DEFUN TAK22 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK23 (TAK51 (1- X) Y Z)
		    (TAK53 (1- Y) Z X)
		    (TAK91 (1- Z) X Y)))))
  (DEFUN TAK23 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK24 (TAK88 (1- X) Y Z)
		    (TAK64 (1- Y) Z X)
		    (TAK8 (1- Z) X Y)))))
  (DEFUN TAK24 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK25 (TAK25 (1- X) Y Z)
		    (TAK75 (1- Y) Z X)
		    (TAK25 (1- Z) X Y)))))
  (DEFUN TAK25 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK26 (TAK62 (1- X) Y Z)
		    (TAK86 (1- Y) Z X)
		    (TAK42 (1- Z) X Y)))))
  (DEFUN TAK26 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK27 (TAK99 (1- X) Y Z)
		    (TAK97 (1- Y) Z X)
		    (TAK59 (1- Z) X Y)))))
  (DEFUN TAK27 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK28 (TAK36 (1- X) Y Z)
		    (TAK8 (1- Y) Z X)
		    (TAK76 (1- Z) X Y)))))
  (DEFUN TAK28 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK29 (TAK73 (1- X) Y Z)
		    (TAK19 (1- Y) Z X)
		    (TAK93 (1- Z) X Y)))))
  (DEFUN TAK29 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK30 (TAK10 (1- X) Y Z)
		    (TAK30 (1- Y) Z X)
		    (TAK10 (1- Z) X Y)))))
  (DEFUN TAK30 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK31 (TAK47 (1- X) Y Z)
		    (TAK41 (1- Y) Z X)
		    (TAK27 (1- Z) X Y)))))
  (DEFUN TAK31 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK32 (TAK84 (1- X) Y Z)
		    (TAK52 (1- Y) Z X)
		    (TAK44 (1- Z) X Y)))))
  (DEFUN TAK32 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK33 (TAK21 (1- X) Y Z)
		    (TAK63 (1- Y) Z X)
		    (TAK61 (1- Z) X Y)))))
  (DEFUN TAK33 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK34 (TAK58 (1- X) Y Z)
		    (TAK74 (1- Y) Z X)
		    (TAK78 (1- Z) X Y)))))
  (DEFUN TAK34 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK35 (TAK95 (1- X) Y Z)
		    (TAK85 (1- Y) Z X)
		    (TAK95 (1- Z) X Y)))))
  (DEFUN TAK35 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK36 (TAK32 (1- X) Y Z)
		    (TAK96 (1- Y) Z X)
		    (TAK12 (1- Z) X Y)))))
  (DEFUN TAK36 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK37 (TAK69 (1- X) Y Z)
		    (TAK7 (1- Y) Z X)
		    (TAK29 (1- Z) X Y)))))
  (DEFUN TAK37 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK38 (TAK6 (1- X) Y Z)
		    (TAK18 (1- Y) Z X)
		    (TAK46 (1- Z) X Y)))))
  (DEFUN TAK38 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK39 (TAK43 (1- X) Y Z)
		    (TAK29 (1- Y) Z X)
		    (TAK63 (1- Z) X Y)))))
  (DEFUN TAK39 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK40 (TAK80 (1- X) Y Z)
		    (TAK40 (1- Y) Z X)
		    (TAK80 (1- Z) X Y)))))
  (DEFUN TAK40 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK41 (TAK17 (1- X) Y Z)
		    (TAK51 (1- Y) Z X)
		    (TAK97 (1- Z) X Y)))))
  (DEFUN TAK41 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK42 (TAK54 (1- X) Y Z)
		    (TAK62 (1- Y) Z X)
		    (TAK14 (1- Z) X Y)))))
  (DEFUN TAK42 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK43 (TAK91 (1- X) Y Z)
		    (TAK73 (1- Y) Z X)
		    (TAK31 (1- Z) X Y)))))
  (DEFUN TAK43 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK44 (TAK28 (1- X) Y Z)
		    (TAK84 (1- Y) Z X)
		    (TAK48 (1- Z) X Y)))))
  (DEFUN TAK44 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK45 (TAK65 (1- X) Y Z)
		    (TAK95 (1- Y) Z X)
		    (TAK65 (1- Z) X Y)))))
  (DEFUN TAK45 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK46 (TAK2 (1- X) Y Z)
		    (TAK6 (1- Y) Z X)
		    (TAK82 (1- Z) X Y)))))
  (DEFUN TAK46 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK47 (TAK39 (1- X) Y Z)
		    (TAK17 (1- Y) Z X)
		    (TAK99 (1- Z) X Y)))))
  (DEFUN TAK47 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK48 (TAK76 (1- X) Y Z)
		    (TAK28 (1- Y) Z X)
		    (TAK16 (1- Z) X Y)))))
  (DEFUN TAK48 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK49 (TAK13 (1- X) Y Z)
		    (TAK39 (1- Y) Z X)
		    (TAK33 (1- Z) X Y)))))
  (DEFUN TAK49 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK50 (TAK50 (1- X) Y Z)
		    (TAK50 (1- Y) Z X)
		    (TAK50 (1- Z) X Y)))))
  (DEFUN TAK50 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK51 (TAK87 (1- X) Y Z)
		    (TAK61 (1- Y) Z X)
		    (TAK67 (1- Z) X Y)))))
  (DEFUN TAK51 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK52 (TAK24 (1- X) Y Z)
		    (TAK72 (1- Y) Z X)
		    (TAK84 (1- Z) X Y)))))
  (DEFUN TAK52 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK53 (TAK61 (1- X) Y Z)
		    (TAK83 (1- Y) Z X)
		    (TAK1 (1- Z) X Y)))))
  (DEFUN TAK53 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK54 (TAK98 (1- X) Y Z)
		    (TAK94 (1- Y) Z X)
		    (TAK18 (1- Z) X Y)))))
  (DEFUN TAK54 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK55 (TAK35 (1- X) Y Z)
		    (TAK5 (1- Y) Z X)
		    (TAK35 (1- Z) X Y)))))
  (DEFUN TAK55 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK56 (TAK72 (1- X) Y Z)
		    (TAK16 (1- Y) Z X)
		    (TAK52 (1- Z) X Y)))))
  (DEFUN TAK56 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK57 (TAK9 (1- X) Y Z)
		    (TAK27 (1- Y) Z X)
		    (TAK69 (1- Z) X Y)))))
  (DEFUN TAK57 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK58 (TAK46 (1- X) Y Z)
		    (TAK38 (1- Y) Z X)
		    (TAK86 (1- Z) X Y)))))
  (DEFUN TAK58 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK59 (TAK83 (1- X) Y Z)
		    (TAK49 (1- Y) Z X)
		    (TAK3 (1- Z) X Y)))))
  (DEFUN TAK59 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK60 (TAK20 (1- X) Y Z)
		    (TAK60 (1- Y) Z X)
		    (TAK20 (1- Z) X Y)))))
  (DEFUN TAK60 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK61 (TAK57 (1- X) Y Z)
		    (TAK71 (1- Y) Z X)
		    (TAK37 (1- Z) X Y)))))
  (DEFUN TAK61 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK62 (TAK94 (1- X) Y Z)
		    (TAK82 (1- Y) Z X)
		    (TAK54 (1- Z) X Y)))))
  (DEFUN TAK62 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK63 (TAK31 (1- X) Y Z)
		    (TAK93 (1- Y) Z X)
		    (TAK71 (1- Z) X Y)))))
  (DEFUN TAK63 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK64 (TAK68 (1- X) Y Z)
		    (TAK4 (1- Y) Z X)
		    (TAK88 (1- Z) X Y)))))
  (DEFUN TAK64 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK65 (TAK5 (1- X) Y Z)
		    (TAK15 (1- Y) Z X)
		    (TAK5 (1- Z) X Y)))))
  (DEFUN TAK65 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK66 (TAK42 (1- X) Y Z)
		    (TAK26 (1- Y) Z X)
		    (TAK22 (1- Z) X Y)))))
  (DEFUN TAK66 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK67 (TAK79 (1- X) Y Z)
		    (TAK37 (1- Y) Z X)
		    (TAK39 (1- Z) X Y)))))
  (DEFUN TAK67 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK68 (TAK16 (1- X) Y Z)
		    (TAK48 (1- Y) Z X)
		    (TAK56 (1- Z) X Y)))))
  (DEFUN TAK68 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK69 (TAK53 (1- X) Y Z)
		    (TAK59 (1- Y) Z X)
		    (TAK73 (1- Z) X Y)))))
  (DEFUN TAK69 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK70 (TAK90 (1- X) Y Z)
		    (TAK70 (1- Y) Z X)
		    (TAK90 (1- Z) X Y)))))
  (DEFUN TAK70 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK71 (TAK27 (1- X) Y Z)
		    (TAK81 (1- Y) Z X)
		    (TAK7 (1- Z) X Y)))))
  (DEFUN TAK71 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK72 (TAK64 (1- X) Y Z)
		    (TAK92 (1- Y) Z X)
		    (TAK24 (1- Z) X Y)))))
  (DEFUN TAK72 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK73 (TAK1 (1- X) Y Z)
		    (TAK3 (1- Y) Z X)
		    (TAK41 (1- Z) X Y)))))
  (DEFUN TAK73 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK74 (TAK38 (1- X) Y Z)
		    (TAK14 (1- Y) Z X)
		    (TAK58 (1- Z) X Y)))))
  (DEFUN TAK74 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK75 (TAK75 (1- X) Y Z)
		    (TAK25 (1- Y) Z X)
		    (TAK75 (1- Z) X Y)))))
  (DEFUN TAK75 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK76 (TAK12 (1- X) Y Z)
		    (TAK36 (1- Y) Z X)
		    (TAK92 (1- Z) X Y)))))
  (DEFUN TAK76 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK77 (TAK49 (1- X) Y Z)
		    (TAK47 (1- Y) Z X)
		    (TAK9 (1- Z) X Y)))))
  (DEFUN TAK77 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK78 (TAK86 (1- X) Y Z)
		    (TAK58 (1- Y) Z X)
		    (TAK26 (1- Z) X Y)))))
  (DEFUN TAK78 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK79 (TAK23 (1- X) Y Z)
		    (TAK69 (1- Y) Z X)
		    (TAK43 (1- Z) X Y)))))
  (DEFUN TAK79 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK80 (TAK60 (1- X) Y Z)
		    (TAK80 (1- Y) Z X)
		    (TAK60 (1- Z) X Y)))))
  (DEFUN TAK80 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK81 (TAK97 (1- X) Y Z)
		    (TAK91 (1- Y) Z X)
		    (TAK77 (1- Z) X Y)))))
  (DEFUN TAK81 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK82 (TAK34 (1- X) Y Z)
		    (TAK2 (1- Y) Z X)
		    (TAK94 (1- Z) X Y)))))
  (DEFUN TAK82 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK83 (TAK71 (1- X) Y Z)
		    (TAK13 (1- Y) Z X)
		    (TAK11 (1- Z) X Y)))))
  (DEFUN TAK83 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK84 (TAK8 (1- X) Y Z)
		    (TAK24 (1- Y) Z X)
		    (TAK28 (1- Z) X Y)))))
  (DEFUN TAK84 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK85 (TAK45 (1- X) Y Z)
		    (TAK35 (1- Y) Z X)
		    (TAK45 (1- Z) X Y)))))
  (DEFUN TAK85 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK86 (TAK82 (1- X) Y Z)
		    (TAK46 (1- Y) Z X)
		    (TAK62 (1- Z) X Y)))))
  (DEFUN TAK86 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK87 (TAK19 (1- X) Y Z)
		    (TAK57 (1- Y) Z X)
		    (TAK79 (1- Z) X Y)))))
  (DEFUN TAK87 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK88 (TAK56 (1- X) Y Z)
		    (TAK68 (1- Y) Z X)
		    (TAK96 (1- Z) X Y)))))
  (DEFUN TAK88 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK89 (TAK93 (1- X) Y Z)
		    (TAK79 (1- Y) Z X)
		    (TAK13 (1- Z) X Y)))))
  (DEFUN TAK89 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK90 (TAK30 (1- X) Y Z)
		    (TAK90 (1- Y) Z X)
		    (TAK30 (1- Z) X Y)))))
  (DEFUN TAK90 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK91 (TAK67 (1- X) Y Z)
		    (TAK1 (1- Y) Z X)
		    (TAK47 (1- Z) X Y)))))
  (DEFUN TAK91 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK92 (TAK4 (1- X) Y Z)
		    (TAK12 (1- Y) Z X)
		    (TAK64 (1- Z) X Y)))))
  (DEFUN TAK92 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK93 (TAK41 (1- X) Y Z)
		    (TAK23 (1- Y) Z X)
		    (TAK81 (1- Z) X Y)))))
  (DEFUN TAK93 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK94 (TAK78 (1- X) Y Z)
		    (TAK34 (1- Y) Z X)
		    (TAK98 (1- Z) X Y)))))
  (DEFUN TAK94 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK95 (TAK15 (1- X) Y Z)
		    (TAK45 (1- Y) Z X)
		    (TAK15 (1- Z) X Y)))))
  (DEFUN TAK95 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK96 (TAK52 (1- X) Y Z)
		    (TAK56 (1- Y) Z X)
		    (TAK32 (1- Z) X Y)))))
  (DEFUN TAK96 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK97 (TAK89 (1- X) Y Z)
		    (TAK67 (1- Y) Z X)
		    (TAK49 (1- Z) X Y)))))
  (DEFUN TAK97 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK98 (TAK26 (1- X) Y Z)
		    (TAK78 (1- Y) Z X)
		    (TAK66 (1- Z) X Y)))))
  (DEFUN TAK98 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK99 (TAK63 (1- X) Y Z)
		    (TAK89 (1- Y) Z X)
		    (TAK83 (1- Z) X Y)))))
  (DEFUN TAK99 (X Y Z) 
    (COND ((NOT (< Y X)) Z)
	  (T (TAK0 (TAK0 (1- X) Y Z)
		   (TAK0 (1- Y) Z X)
		   (TAK0 (1- Z) X Y)))))

∂04-May-82  1308	RPG  	Barrow FFT    
To:   lisptranslators at SU-AI   
Here is the Barrow FFT benchmark which tests floating operations
of various types, including flonum arrays. (ARRAYCALL FLONUM A I)
accesses the I'th element of the FLONUM array A, where these arrays are
0-based. (STORE (ARRAYCALL FLONUM A I) V) stores the value V in the
I'th element of the FLONUM array A. 

There was a fair amount of FLONUM GC's in the SAIL MacLisp run, which,
when it needed to CORE up during GC, took 4.5 seconds of CPU time for the
computation and 15 seconds for GC. Other configurations of memory required
only 1.5 seconds for GC.

Refer to this as FFT.
			-rpg-

;;; *-*lisp*-* 
;;; From Rich Duda, by way of Harry Barrow -- 3/26/82 

(DEFUN FFT					        ;Fast Fourier Transform
  (AREAL AIMAG)                                         ;AREAL = real part
  (PROG							;AIMAG = imaginary part
   (AR AI PI I J K M N LE LE1 IP NV2 NM1 UR UI WR WI TR TI)
    (SETQ AR (GET AREAL 'ARRAY))			;Initialize
    (SETQ AI (GET AIMAG 'ARRAY))
    (SETQ PI 3.141592653589793)
    (SETQ N (CADR (ARRAYDIMS AR)))
    (SETQ N (1- N))
    (SETQ NV2 (// N 2))
    (SETQ NM1 (1- N))
    (SETQ M 0)						;Compute M = log(N)
    (SETQ I 1)
   L1 (COND
       ((< I N)(SETQ M (1+ M))(SETQ I (+ I I))(GO L1)))
    (COND ((NOT (EQUAL N (↑ 2 M)))
	   (PRINC "Error ... array size not a power of two.")
	   (READ)
	   (RETURN (TERPRI))))
    (SETQ J 1)						;Interchange elements
    (SETQ I 1)						;in bit-reversed order
   L3 (COND ((< I J)
	     (SETQ TR (ARRAYCALL FLONUM AR J))
	     (SETQ TI (ARRAYCALL FLONUM AI J))
	     (STORE (ARRAYCALL FLONUM AR J) (ARRAYCALL FLONUM AR
I))
	     (STORE (ARRAYCALL FLONUM AI J) (ARRAYCALL FLONUM AI
I))
	     (STORE (ARRAYCALL FLONUM AR I) TR)	
	     (STORE (ARRAYCALL FLONUM AI I) TI)))
    (SETQ K NV2)
   L6 (COND ((< K J) (SETQ J (- J K))(SETQ K (// K 2))(GO L6)))
    (SETQ J (+ J K))
    (SETQ I (1+ I))
    (COND ((< I N)(GO L3)))
    (DO L 1 (1+ L) (> L M)				;Loop thru stages
	(SETQ LE (↑ 2 L))
	(SETQ LE1 (// LE 2))
	(SETQ UR 1.0)
	(SETQ UI 0.0)
	(SETQ WR (COS (//$ PI (FLOAT LE1))))
	(SETQ WI (SIN (//$ PI (FLOAT LE1))))
	(DO J 1 (1+ J) (> J LE1)			;Loop thru butterflies
	    (DO I J (+ I LE) (> I N)			;Do a butterfly
		(SETQ IP (+ I LE1))
		(SETQ TR (-$ (*$ (ARRAYCALL FLONUM AR IP) UR)
			     (*$ (ARRAYCALL FLONUM AI IP) UI)))
		(SETQ TI (+$ (*$ (ARRAYCALL FLONUM AR IP) UI)
			     (*$ (ARRAYCALL FLONUM AI IP) UR)))
		(STORE (ARRAYCALL FLONUM AR IP)
		       (-$ (ARRAYCALL FLONUM AR I) TR))
		(STORE (ARRAYCALL FLONUM AI IP)
		       (-$ (ARRAYCALL FLONUM AI I) TI))
		(STORE (ARRAYCALL FLONUM AR I)
		       (+$ (ARRAYCALL FLONUM AR I) TR))
		(STORE (ARRAYCALL FLONUM AI I)
		       (+$ (ARRAYCALL FLONUM AI I) TI)))
	    (SETQ TR (-$ (*$ UR WR) (*$ UI WI)))
	    (SETQ TI (+$ (*$ UR WI) (*$ UI WR)))
	    (SETQ UR TR)
	    (SETQ UI TI)))
    (RETURN T)))



;;; Sets up the two arrays
(SETQ RE (ARRAY RE FLONUM 1025.))

(SETQ IM (ARRAY IM FLONUM 1025.))


;;; The timer which does 10 calls on FFT

(defun timit ()
       ((lambda (t1 x gt)
		(do ((ntimes 0 (1+ ntimes)))
		    ((= ntimes 10.))
		    (fft 're 'im))
		(setq t1 (- (runtime) t1))
		(setq gt (- (status gctime) gt))
		(print (list 'runtime
			     (//$ (float  (- t1 gt))
				  1000000.0)))
		(print (list 'gctime
			     (//$ (float gt) 1000000.0))))
	(runtime) ()(status gctime)))


(*rset (nouuo ()))

∂04-May-82  1317	RPG  	More Info on FFT   
To:   lisptranslators at SU-AI   
I forgot to add that (CADR (ARRAYDIMS AR)) gets the 
length of the first dimension of the array.

These lines:
(SETQ RE (ARRAY RE FLONUM 1025.))

(SETQ IM (ARRAY IM FLONUM 1025.))

sets up the arrays. (ARRAY RE FLONUM 1025.) sets up a 1-dimensional
FLONUM array with 1025. elements. The SETQ is requied so that the array
pointer is in the value cell and not on the property list, which is what
is needed for ARRAYCALL, which is a fast array accessor.
			-rpg-

∂06-May-82  0128	Mabry Tyson <Tyson at SRI-AI> 	MAS results for UCI Lisp
Date:  6 May 1982 0126-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: MAS results for UCI Lisp
To: rpg at SU-AI

TAK program with 100 different TAKs for UCI Lisp (version from University of
Texas running on SRI-AI Tops-20 2060 with compatibility package.)

Notes on this program:  The SUB1 is generic and is not compiled in-line.

(TAK 18. 12. 6.)
Interpreted: 22.485 seconds
Compiled, no fast links: 5.124 seconds
Compiled, fast links: 1.099 seconds


(The slowdown for the first two results may be due to increased time to search
the OBLIST.  I don't know why the third time is 7% slower than the recursive
TAK.  It might be due to increased swapping from other jobs??)
-------

∂06-May-82  0129	Mabry Tyson <Tyson at SRI-AI> 	FRPOLY results for UCI Lisp  
Date:  6 May 1982 0127-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: FRPOLY results for UCI Lisp
To: rpg at SU-AI

Results on FRPOLY for UCILISP (version from University of Texas at Austin
running on SRI-AI TOPS-20).  Running with the TOPS-10/TOPS-20 compatibility
package.

Notes relevent to this problem:  UCILSP does not have bignums.  (The hooks are
there but I know of no source that uses them.)   All arithmetic operations
are generic and are NOT compiled in-line.  The times reported are CPU times
and the first time (the total) includes the second time (the GC time).
The only non-syntactic change to the source was to change the divisor for
the time computation from 1000000.0 to 1000.0 since UCILSP reports times in
milliseconds.

Interpreted (Macros expanded once only):
(bench 2)
(POWER= 2 (0.58200000 0.00000000) ? (0.67300000 0.00000000))
(bench 5)
(POWER= 5 (6.0660000 0.00000000) ? (6.1940000 0.00000000))

Compiled without fast links (and that includes calls to arithmetic functions):
(bench 2)
(POWER= 2 (0.84000000E-1 0.00000000) ? (0.90000000E-1 0.00000000))
(bench 5)
(POWER= 5 (0.93900000 0.00000000) ? (1.0160000 0.00000000))
(bench 10)
(POWER= 10 (19.273000 1.1050000) ? (19.655000 1.0580000))
(bench 15)
(POWER= 15 (31.048000 2.1530000) ? (33.469000 3.2140000))

Compiled with fast links:
(bench 2)
(POWER= 2 (0.18000000E-1 0.00000000) ? (0.23000000E-1 0.00000000))
(bench 5)
(POWER= 5 (0.20800000 0.00000000) ? (0.27900000 0.00000000))
(bench 10)
(POWER= 10 (5.5870000 1.0570000) ? (6.3900000 1.0760000))
(bench 15)
(POWER= 15 (9.1310000 2.0910000) ? (11.560000 3.1940000))
-------

∂06-May-82  0128	Mabry Tyson <Tyson at SRI-AI> 	TAK results for UCI Lisp
Date:  6 May 1982 0124-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: TAK results for UCI Lisp
To: rpg at SU-AI

Results for the modified TAK function for UCILISP (version from UTEXAS,
running on SRI-AI TOPS-20 system with compatibility package).

Notes on this program:  Arithmetic functions in UCI Lisp are generic
and are not compiled in-line.  The compiler does not recognize tail
recursion, even in the munged around version supplied.  (Why was it
presented with (NOT (< Y X)) first rather than the more natural order?
Must have been to get someone's compiler to do tail recursion when it 
otherwise wouldn't!)



(DEXPR TAK (X Y Z)
  (COND [(NOT (< Y X)) Z]
	[T  (TAK (TAK (SUB1 X) Y Z) (TAK (SUB1 Y) Z X) (TAK (SUB1 Z) X Y))]))

(TAK 18. 12. 6.)

Interpreted:  22.544 seconds
Compiled, without fast links: 4.801 seconds
Compiled, with fast links: 0.930 seconds

(No garbage collections during any of these.)
-------

∂06-May-82  2022	Kim.fateman at Berkeley 	polynomial benchmark, translated to interlisp
Date: 6 May 1982 20:16:48-PDT
From: Kim.fateman at Berkeley
To: rpg@su-ai
Subject: polynomial benchmark, translated to interlisp

This is a version contributed by David Dyer, based on the
polynomial problem I submitted a while back.
I think it is comparable in most respects, though there
may be some savings available if you declare local variables.
I am unsure about this, but I suspect either the VAX ignores
this, or maybe it is done in there somehow.  Anyway, here it is:
............
(FILECREATED "20-Feb-82 19:42:04" <DDYER>IPOLY..13 6186   

     previous date: "20-Feb-82 19:36:45" <DDYER>IPOLY..11)


(PRETTYCOMPRINT IPOLYCOMS)

(RPAQQ IPOLYCOMS ((DECLARE: DONTEVAL@LOAD DOEVAL@COMPILE DONTCOPY
			    (P (SPECVARS ANS COEF F INC I K QQ SS V *X* *ALPHA *A* *B* *CHK *L *P Q* 
					 U* *VAR *Y* R R2 R3 START RES1 RES2 RES3)))
	(FNS PCOEFADD PCPLUS PCPLUS1 PPLUS PTIMES PTIMES1 PTIMES2 PTIMES3 PSIMP PCTIMES PCTIMES1 
	     PEXPTSQ PPLUS1 BENCH ODDP SETUP)
	(MACROS * IPOLYMACROS)))
(DECLARE: DONTEVAL@LOAD DOEVAL@COMPILE DONTCOPY 
(SPECVARS ANS COEF F INC I K QQ SS V *X* *ALPHA *A* *B* *CHK *L *P Q* U* *VAR *Y* R R2 R3 START RES1 
	  RES2 RES3)
)
(DEFINEQ

(PCOEFADD
  [LAMBDA (E C X)
    (COND
      ((PZEROP C)
	X)
      (T (CONS E (CONS C X])

(PCPLUS
  [LAMBDA (C P)
    (COND
      ((PCOEFP P)
	(CPLUS P C))
      (T (PSIMP (CAR P)
		(PCPLUS1 C (CDR P])

(PCPLUS1
  [LAMBDA (C X)
    (COND
      [(NULL X)
	(COND
	  ((PZEROP C)
	    NIL)
	  (T (CONS 0 (CONS C NIL]
      ((PZEROP (CAR X))
	(PCOEFADD 0 (PPLUS C (CADR X))
		  NIL))
      (T (CONS (CAR X)
	       (CONS (CADR X)
		     (PCPLUS1 C (CDDR X])

(PPLUS
  [LAMBDA (X Y)
    (COND
      ((PCOEFP X)
	(PCPLUS X Y))
      ((PCOEFP Y)
	(PCPLUS Y X))
      [(EQ (CAR X)
	   (CAR Y))
	(PSIMP (CAR X)
	       (PPLUS1 (CDR Y)
		       (CDR X]
      [(POINTERGP (CAR X)
		  (CAR Y))
	(PSIMP (CAR X)
	       (PCPLUS1 Y (CDR X]
      (T (PSIMP (CAR Y)
		(PCPLUS1 X (CDR Y])

(PTIMES
  [LAMBDA (X Y)
    (COND
      ((OR (PZEROP X)
	   (PZEROP Y))
	(PZERO))
      ((PCOEFP X)
	(PCTIMES X Y))
      ((PCOEFP Y)
	(PCTIMES Y X))
      [(EQ (CAR X)
	   (CAR Y))
	(PSIMP (CAR X)
	       (PTIMES1 (CDR X)
			(CDR Y]
      [(POINTERGP (CAR X)
		  (CAR Y))
	(PSIMP (CAR X)
	       (PCTIMES1 Y (CDR X]
      (T (PSIMP (CAR Y)
		(PCTIMES1 X (CDR Y])

(PTIMES1
  [LAMBDA (*X* Y)
    (PROG (U* V)
          (SETQ V (SETQ U*(PTIMES2 Y)))
      A   (SETQ *X*(CDDR *X*))
          (COND
	    ((NULL *X*)
	      (RETURN U*)))
          (PTIMES3 Y)
          (GO A])

(PTIMES2
  [LAMBDA (Y)
    (COND
      ((NULL Y)
	NIL)
      (T (PCOEFADD (PLUS (CAR *X*)
			 (CAR Y))
		   (PTIMES (CADR *X*)
			   (CADR Y))
		   (PTIMES2 (CDDR Y])

(PTIMES3
  [LAMBDA (Y)
    (PROG (E U C)
      A1  (COND
	    ((NULL Y)
	      (RETURN NIL)))
          (SETQ E (IPLUS (CAR *X*)
			 (CAR Y)))
          (SETQ C (PTIMES (CADR Y)
			  (CADR *X*)))
          (COND
	    ((PZEROP C)
	      (SETQ Y (CDDR Y))
	      (GO A1))
	    ((OR (NULL V)
		 (IGREATERP E (CAR V)))
	      [SETQ U*(SETQ V (PPLUS1 U*(LIST E C]
	      (SETQ Y (CDDR Y))
	      (GO A1))
	    ((IEQP E (CAR V))
	      (SETQ C (PPLUS C (CADR V)))
	      (COND
		[(PZEROP C)
		  (SETQ U*(SETQ V (PDIFFER1 U*(LIST (CAR V)
						    (CADR V]
		(T (RPLACA (CDR V)
			   C)))
	      (SETQ Y (CDDR Y))
	      (GO A1)))
      A   (COND
	    ((AND (CDDR V)
		  (IGREATERP (CADDR V)
			     E))
	      (SETQ V (CDDR V))
	      (GO A)))
          (SETQ U (CDR V))
      B   (COND
	    ((OR (NULL (CDR U))
		 (ILESSP (CADR U)
			 E))
	      [RPLACD U (CONS E (CONS C (CDR U]
	      (GO E)))
          (COND
	    ((PZEROP (SETQ C (PPLUS (CADDR U)
				    C)))
	      (RPLACD U (CDDDR U))
	      (GO D))
	    (T (RPLACA (CDDR U)
		       C)))
      E   (SETQ U (CDDR U))
      D   (SETQ Y (CDDR Y))
          (COND
	    ((NULL Y)
	      (RETURN NIL)))
          (SETQ E (IPLUS (CAR *X*)
			 (CAR Y)))
          (SETQ C (PTIMES (CADR Y)
			  (CADR *X*)))
      C   (COND
	    ((AND (CDR U)
		  (IGREATERP (CADR U)
			     E))
	      (SETQ U (CDDR U))
	      (GO C)))
          (GO B])

(PSIMP
  [LAMBDA (VAR X)
    (COND
      ((NULL X)
	0)
      ((ATOM X)
	X)
      ((ZEROP (CAR X))
	(CADR X))
      (T (CONS VAR X])

(PCTIMES
  [LAMBDA (C P)
    (COND
      ((PCOEFP P)
	(CTIMES C P))
      (T (PSIMP (CAR P)
		(PCTIMES1 C (CDR P])

(PCTIMES1
  [LAMBDA (C X)
    (COND
      ((NULL X)
	NIL)
      (T (PCOEFADD (CAR X)
		   (PTIMES C (CADR X))
		   (PCTIMES1 C (CDDR X])

(PEXPTSQ
  [LAMBDA (P N)
    (PROG (S)
          (SETQ S (COND
	      ((ODDP N)
		P)
	      (T 1)))
          (SETQ N (QUOTIENT N 2))
      LOOP(COND
	    ((ZEROP N)
	      (RETURN S)))
          (SETQ P (PTIMES P P))
          (AND (ODDP N)
	       (SETQ S (PTIMES S P)))
          (SETQ N (QUOTIENT N 2))
          (GO LOOP])

(PPLUS1
  [LAMBDA (X Y)
    (COND
      ((NULL X)
	Y)
      ((NULL Y)
	X)
      [(IEQP (CAR X)
	     (CAR Y))
	(PCOEFADD (CAR X)
		  (PPLUS (CADR X)
			 (CADR Y))
		  (PPLUS1 (CDDR X)
			  (CDDR Y]
      [(IGREATERP (CAR X)
		  (CAR Y))
	(CONS (CAR X)
	      (CONS (CADR X)
		    (PPLUS1 (CDDR X)
			    Y]
      (T (CONS (CAR Y)
	       (CONS (CADR Y)
		     (PPLUS1 X (CDDR Y])

(BENCH
  [LAMBDA (N)
    (TIME (PEXPTSQ R N)
	  1 3])

(ODDP
  [LAMBDA (X)
    (EQP (REMAINDER X 2)
	 1])

(SETUP
  [LAMBDA NIL
    (PUTPROP (QUOTE X)
	     (QUOTE ORDER)
	     1)
    (PUTPROP (QUOTE Y)
	     (QUOTE ORDER)
	     2)
    (PUTPROP (QUOTE Z)
	     (QUOTE ORDER)
	     3)
    [SETQ R (PPLUS (QUOTE (X 1 1 0 1))
		   (PPLUS (QUOTE (Y 1 1))
			  (QUOTE (Z 1 1]
    (SETQ R2 (PTIMES R 100000))
    (SETQ R3 (PTIMES R 1.0])
)

(RPAQQ IPOLYMACROS (CPLUS CTIMES PCOEFP POINTERGP PZERO PZEROP))
(DECLARE: EVAL@COMPILE 

(PUTPROPS CPLUS MACRO [LAMBDA (X Y)
			(PLUS X Y])

(PUTPROPS CTIMES MACRO [LAMBDA (X Y)
			 (TIMES X Y])

(PUTPROPS PCOEFP MACRO [LAMBDA (E)
			 (ATOM E])

(PUTPROPS POINTERGP MACRO [LAMBDA (X Y)
			    (IGREATERP (GETPROP X (QUOTE ORDER))
				       (GETPROP Y (QUOTE ORDER])

(PUTPROPS PZERO MACRO [LAMBDA NIL 0])

(PUTPROPS PZEROP MACRO [LAMBDA (X)
			 (EQP X 0])
)
(DECLARE: DONTCOPY
  (FILEMAP (NIL (694 5651 (PCOEFADD 706 . 803) (PCPLUS 807 . 936) (PCPLUS1 940 . 1218) (PPLUS 1222 . 
1590) (PTIMES 1594 . 2013) (PTIMES1 2017 . 2246) (PTIMES2 2250 . 2441) (PTIMES3 2445 . 3914) (PSIMP 
3918 . 4062) (PCTIMES 4066 . 4198) (PCTIMES1 4202 . 4361) (PEXPTSQ 4365 . 4727) (PPLUS1 4731 . 5156) (
BENCH 5160 . 5224) (ODDP 5228 . 5285) (SETUP 5289 . 5648)))))
STOP

∂03-May-82  2027	HEDRICK at RUTGERS (Mgr DEC-20s/Dir LCSR Comp Facility) 	Puzzle benchmark  
Date:  3 May 1982 2322-EDT
From: HEDRICK at RUTGERS (Mgr DEC-20s/Dir LCSR Comp Facility)
Subject: Puzzle benchmark
To: rpg at SU-AI

Hmmm... If you are puzzled by my most recent message, which corrects
a message you never got, the reason is that I typed RGP@SAIL.
The corrected data is in the latest message, so you don't need it
anyway.  I did comment about the way I converted the Puzzle benchmark:

I did a straightforward conversion, defining DO and LET as macros,
so the program structure was completely unchanged.

Of course the problem with Elisp and R/UCI Lisp is that Maclisp is
going to compile a lot of this benchmark open, whereas array
accesses and arithmetic are done by runtimes in Elisp and R/UCI Lisp.
I am not sure this is the worst possible benchmark for Elisp and R/UCI
Lisp, but it is probably close.

Note that there are a couple of special cases in which vector accesses
compile open.  They are designed to make structure accesses (I have
a subset of DEFSTRUC in Elisp) compile open.
-------

∂04-May-82  0208	JONL at PARC-MAXC 	Barrow's MacLISP version of FFT
Date:  4 MAY 1982 0208-PDT
From: JONL at PARC-MAXC
Subject: Barrow's MacLISP version of FFT
to:   rpg at SU-AI

;;; *-*lisp*-* 
;;; From Rich Duda, by way of Harry Barrow -- 3/26/82 

(DEFUN FFT					        ;Fast Fourier Transform
  (AREAL AIMAG)                                         ;AREAL = real part
  (PROG							;AIMAG = imaginary part
   (AR AI PI I J K M N LE LE1 IP NV2 NM1 UR UI WR WI TR TI)
    (SETQ AR (GET AREAL 'ARRAY))			;Initialize
    (SETQ AI (GET AIMAG 'ARRAY))
    (SETQ PI 3.141592653589793)
    (SETQ N (CADR (ARRAYDIMS AR)))
    (SETQ N (1- N))
    (SETQ NV2 (// N 2))
    (SETQ NM1 (1- N))
    (SETQ M 0)						;Compute M = log(N)
    (SETQ I 1)
   L1 (COND
       ((< I N)(SETQ M (1+ M))(SETQ I (+ I I))(GO L1)))
    (COND ((NOT (EQUAL N (↑ 2 M)))
	   (PRINC "Error ... array size not a power of two.")
	   (READ)
	   (RETURN (TERPRI))))
    (SETQ J 1)						;Interchange elements
    (SETQ I 1)						;in bit-reversed order
   L3 (COND ((< I J)
	     (SETQ TR (ARRAYCALL FLONUM AR J))
	     (SETQ TI (ARRAYCALL FLONUM AI J))
	     (STORE (ARRAYCALL FLONUM AR J) (ARRAYCALL FLONUM AR
I))
	     (STORE (ARRAYCALL FLONUM AI J) (ARRAYCALL FLONUM AI
I))
	     (STORE (ARRAYCALL FLONUM AR I) TR)	
	     (STORE (ARRAYCALL FLONUM AI I) TI)))
    (SETQ K NV2)
   L6 (COND ((< K J) (SETQ J (- J K))(SETQ K (// K 2))(GO L6)))
    (SETQ J (+ J K))
    (SETQ I (1+ I))
    (COND ((< I N)(GO L3)))
    (DO L 1 (1+ L) (> L M)				;Loop thru stages
	(SETQ LE (↑ 2 L))
	(SETQ LE1 (// LE 2))
	(SETQ UR 1.0)
	(SETQ UI 0.0)
	(SETQ WR (COS (//$ PI (FLOAT LE1))))
	(SETQ WI (SIN (//$ PI (FLOAT LE1))))
	(DO J 1 (1+ J) (> J LE1)			;Loop thru butterflies
	    (DO I J (+ I LE) (> I N)			;Do a butterfly
		(SETQ IP (+ I LE1))
		(SETQ TR (-$ (*$ (ARRAYCALL FLONUM AR IP) UR)
			     (*$ (ARRAYCALL FLONUM AI IP) UI)))
		(SETQ TI (+$ (*$ (ARRAYCALL FLONUM AR IP) UI)
			     (*$ (ARRAYCALL FLONUM AI IP) UR)))
		(STORE (ARRAYCALL FLONUM AR IP)
		       (-$ (ARRAYCALL FLONUM AR I) TR))
		(STORE (ARRAYCALL FLONUM AI IP)
		       (-$ (ARRAYCALL FLONUM AI I) TI))
		(STORE (ARRAYCALL FLONUM AR I)
		       (+$ (ARRAYCALL FLONUM AR I) TR))
		(STORE (ARRAYCALL FLONUM AI I)
		       (+$ (ARRAYCALL FLONUM AI I) TI)))
	    (SETQ TR (-$ (*$ UR WR) (*$ UI WI)))
	    (SETQ TI (+$ (*$ UR WI) (*$ UI WR)))
	    (SETQ UR TR)
	    (SETQ UI TI)))
    (RETURN T)))



(SETQ RE (ARRAY RE FLONUM 1025.))

(SETQ IM (ARRAY IM FLONUM 1025.))



(defun try (n)
  (prog (t0)
    (setq t0 (runtime))
    (do ((ntimes 0 (1+ ntimes)))
	((= ntimes n))
      (fft 're 'im))
    (setq t0 (- (runtime) t0))
    (terpri)(terpri)
    (princ "Total time = ")(princ t0)
    (princ "   for ")(princ n)(princ " iterations.")
    (terpri)
    (princ "Average time = ")(princ (quotient t0 n))(princ " per iteration.")
    (terpri)(terpri)))

(PRINT 'DONE)

∂07-May-82  1956	MASINTER at PARC-MAXC 	Interlisp-10 FFT timings   
Date:  7 MAY 1982 1956-PDT
From: MASINTER at PARC-MAXC
Subject: Interlisp-10 FFT timings
To:   RPG at SU-AI
cc:   masinter

Here are the Interlisp-10 times I got using a slight modification
of Barrow's original translation:

 Speed:     1.715 CPU seconds        Space:         1 large integers
           13.033 real seconds                  13318 floating numbers
            9.975 gc time                          39 page faults
load av=     .679 

With MINFS(20000 FLOATP)

 Speed:     1.691 CPU seconds        Space:         1 large integers
            4.190 real seconds                  13318 floating numbers
            1.917 gc time                          28 page faults
load av=     .724 

Here is the code. First, a "fast floating" package:

(FILECREATED " 7-May-82 19:50:45" <MASINTER>FELT..3 660    

     previous date: "30-Mar-82 00:30:40" <MASINTER>FELT..2)


(PRETTYCOMPRINT FELTCOMS)

(RPAQQ FELTCOMS [(MACROS * FELTMACROS)
		 (P (MOVD (QUOTE ELT)
			  (QUOTE FLELT))
		    (MOVD (QUOTE SETA)
			  (QUOTE FLSETA])

(RPAQQ FELTMACROS (FLELT FLSETA))
(DECLARE: EVAL@COMPILE 

(PUTPROPS FLELT MACRO [(A N)
	   (.FLOC. (VAG (OPENR (VAG (IPLUS (LOC A)
					   (ADD1 N])

(PUTPROPS FLSETA MACRO ((A N V)
			(CLOSER (IPLUS (LOC A)
				       (ADD1 N))
				(FLOAT V))))
)
(MOVD (QUOTE ELT)
      (QUOTE FLELT))
(MOVD (QUOTE SETA)
      (QUOTE FLSETA))
(DECLARE: DONTCOPY
  (FILEMAP (NIL)))
STOP


And then FFTI

(FILECREATED " 7-May-82 19:50:31" <MASINTER>FFTI.LSP.5 3390   

     previous date: " 7-May-82 19:45:09" <MASINTER>FFTI.LSP.4)


(PRETTYCOMPRINT FFTICOMS)

(RPAQQ FFTICOMS ((FNS * FFTIFNS)
		 (LOCALVARS . T)))

(RPAQQ FFTIFNS (FFT TRY))
(DEFINEQ

(FFT
  [LAMBDA (AREAL AIMAG)                         (* edited: 
						"30-Mar-82 00:25")
                                                (* Fast Fourier 
						Transform AREAL = real 
						part, AIMAG = imaginary 
						part)
    (PROG (AR AI PI I J K M N LE LE1 IP NV2 NM1 UR UI WR WI TR TI)
          (SETQ AR AREAL)                       (* Initialize)
          (SETQ AI AIMAG)
          (SETQ PI 3.141593)
          (SETQ N (ARRAYSIZE AR))
          (SETQ NV2 (IQUOTIENT N 2))
          (SETQ NM1 (SUB1 N))
          (SETQ M 0)                            (* Compute M = log 
						(N))
          (SETQ I 1)
      L1  (COND
	    ((ILESSP I N)
	      (SETQ M (ADD1 M))
	      (SETQ I (IPLUS I I))
	      (GO L1)))
          [COND
	    ((NOT (IEQP N (EXPT 2 M)))
	      (PRIN1 "Error ... array size not a power of two.")
	      (HELP)
	      (RETURN (TERPRI]
          (SETQ J 1)                            (* Interchange elements)
          (SETQ I 1)                            (* in bit-reversed 
						order)
      L3  (COND
	    ((ILESSP I J)
	      (SETQ TR (FLELT AR J))
	      (SETQ TI (FLELT AI J))
	      (FLSETA AR J (FLELT AR I))
	      (FLSETA AI J (FLELT AI I))
	      (FLSETA AR I TR)
	      (FLSETA AI I TI)))
          (SETQ K NV2)
      L6  (COND
	    ((ILESSP K J)
	      (SETQ J (IDIFFERENCE J K))
	      (SETQ K (IQUOTIENT K 2))
	      (GO L6)))
          (SETQ J (IPLUS J K))
          (SETQ I (ADD1 I))
          (COND
	    ((ILESSP I N)
	      (GO L3)))
          (for L from 1 to M
	     do                                 (* Loop thru stages)
		(SETQ LE (EXPT 2 L))
		(SETQ LE1 (IQUOTIENT LE 2))
		(SETQ UR 1.0)
		(SETQ UI 0.0)
		[SETQ WR (COS (FQUOTIENT PI (FLOAT LE1]
		[SETQ WI (SIN (FQUOTIENT PI (FLOAT LE1]
		(for J from 1 to LE1
		   do                           (* Loop thru 
						butterflies)
		      (for I←J by (IPLUS I LE) while (ILEQ I N)
			 do                     (* Do a butterfly)
			    (SETQ IP (IPLUS I LE1))
			    (SETQ TR (FDIFFERENCE (FTIMES (FLELT AR IP)
							  UR)
						  (FTIMES (FLELT AI IP)
							  UI)))
			    (SETQ TI (FPLUS (FTIMES (FLELT AR IP)
						    UI)
					    (FTIMES (FLELT AI IP)
						    UR)))
			    (FLSETA AR IP (FDIFFERENCE (FLELT AR I)
						       TR))
			    (FLSETA AI IP (FDIFFERENCE (FLELT AI I)
						       TI))
			    (FLSETA AR I (FPLUS (FLELT AR I)
						TR))
			    (FLSETA AI I (FPLUS (FLELT AI I)
						TI)))
		      (SETQ TR (FDIFFERENCE (FTIMES UR WR)
					    (FTIMES UI WI)))
		      (SETQ TI (FPLUS (FTIMES UR WI)
				      (FTIMES UI WR)))
		      (SETQ UR TR)
		      (SETQ UI TI)))
          (RETURN T])

(TRY
  [LAMBDA (SIZE)                                (* edited: 
						"30-Mar-82 00:26")
    (COND
      ((NULL SIZE)
	(SETQ SIZE 1024)))
    (SETQ RE (ARRAY SIZE (QUOTE FLOATP)))
    (SETQ IM (ARRAY SIZE (QUOTE FLOATP)))
    (for I from 1 to SIZE do (FLSETA RE I (FLOAT 0))
			     (FLSETA IM I (FLOAT 0)))
    (TIME (FFT RE IM)
	  1])
)
(DECLARE: DOEVAL@COMPILE DONTCOPY

(LOCALVARS . T)
)
(DECLARE: DONTCOPY
  (FILEMAP (NIL (248 3309 (FFT 260 . 2954) (TRY 2958 . 3306)))))
STOP

∂07-May-82  2142	MASINTER at PARC-MAXC 	archives for LispTranslators@SU-AI   
Date:  7 MAY 1982 2141-PDT
From: MASINTER at PARC-MAXC
Subject: archives for LispTranslators@SU-AI
To:   RPG at SU-AI
cc:   Tyson at SRI-AI, Masinter

Are messages to the timing lists kept online at SU-AI?

Larry

Yes. Timing.msg[tim,lsp] which needs no password to FTP away from.
			-rpg-
∂07-May-82  2159	MASINTER at PARC-MAXC 	Interlisp-10 TAK timings   
Date:  7 MAY 1982 2159-PDT
From: MASINTER at PARC-MAXC
Subject: Interlisp-10 TAK timings
To:   RPG at SU-AI
cc:   masinter

Block compiled: 2.04 seconds
Regular compiled, not swapped: 4.57 seconds
regular compiled, MKSWAPSIZE set low: 12.7 seconds

On ISIB (2060).

There were some better Interlisp-10 times obtained using the
NOBOX package, but I don't have them.

Larry

∂08-May-82  1032	MASINTER at PARC-MAXC 	A note of warning in doing Interlisp-10 timings...  
Date:  8 MAY 1982 1031-PDT
From: MASINTER at PARC-MAXC
Subject: A note of warning in doing Interlisp-10 timings...
To:   LispTranslators at SU-AI
cc:   masinter

Interlisp-10 has a 'feature' where compiled functions can be
made 'swapped': the code is actually kept in another fork, and
at function entry time, it is PMAP'ed up into the running fork
if not already there.

Unfortunately, the swapper is not very efficient: calling a swapped
function is significantly (factor of 2) worse than calling a 
non-swapped function even in the best case.

A heuristic was chosen to decide which functions should be swapped:
functions smaller than MKSWAPSIZE (in words of compiled code) are
not swapped; those bigger are.

This works ok, since generaly you spend more time in bigger functions
and so the call overhead is dominated by the time in the function.
You can also specifically ask that functions not be swapped
(by adding them to NOSWAPFNS).

A few sites (e.g., ISI) have chosen to reload the entire Interlisp-10
system with MKSWAPSIZE set very small. This gives them more space
(19 pages), at a performance cost in calling some trivial functions.

For example, if ELT and SETA are swapped, the overhead in calling
them in inner loops can easily be increased by a factor of two.

I imagine this can also have an impact on the timings of larger
programs as well.

Larry

∂08-May-82  2132	Kim.jkf at Berkeley 	mas benchmark 
Date: 8 May 1982 21:25:39-PDT
From: Kim.jkf at Berkeley
To: rpg@su-ai
Subject: mas benchmark
Cc: Kim.fateman@Berkeley

mas benchmark (tak benchmark using tak0 through tak99)
Lisp Opus 38.16,
Liszt 8.06

				780			750

slow links			13.26			19.17

normal links			 3.62			 5.09


slow links = translink nil, fast links = translink on


∂08-May-82  2148	Kim.jkf at Berkeley 	updated mas benchmark results
Date: 8 May 1982 21:41:30-PDT
From: Kim.jkf at Berkeley
To: rpg@su-ai
Subject: updated mas benchmark results
Cc: Kim.fateman@Berkeley

  I ran it with local functions and here are the complete results:

mas benchmark (tak benchmark using tak0 through tak99)
Lisp Opus 38.16,
Liszt 8.06

				780			750

slow links			13.26			19.17

normal links			 3.62			 5.09

local functions			 1.7			 2.7

slow links = translink nil, fast links = translink on


∂10-May-82  1000	RPG  	MAS Benchmark 
To:   lisptranslators at SU-AI   
Larry Masinter sent out a benchmark called ``MAS'' which uses
lists instead of numbers. As you recall the benchmark was to time
the call (MAS (LISTN 18.) (LISTN 12.) (LISTM 6.)). In order to more
easily compare MAS and TAK', could you time (MAS 18L 12L 6L) where
you have done (SETQ 18L (LISTN 18.) 12L (LISTN 12.) 6L (LISTN 6.)).

Thank you.
			-rpg-

∂10-May-82  1917	Mabry Tyson <Tyson at SRI-AI> 	MAS timings for UCI-Lisp
Date: 10 May 1982 1913-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: MAS timings for UCI-Lisp
To: rpg at SU-AI

Timings for the MAS benchmark for UCI-Lisp (version from UTexas-20, run
on SRI-AI):

(MAS L18 L12 L6)

Compiled, slow links ((NOUUO T))	15.849 seconds
Compiled, fast links ((NOUUO NIL))	 3.633 seconds
-------

∂10-May-82  2101	Mabry Tyson <Tyson at SRI-AI> 	UCI Lisp on Basket Puzzle    
Date: 10 May 1982 2055-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: UCI Lisp on Basket Puzzle
To: rpg at SU-AI

Results on the Basket Puzzle Benchmark for UCI-Lisp (version from UTexas-20,
tests run on SRI-AI 2060).

Compiled, slow links:	85.804 seconds (no GC)
Compiled, fast links:	23.966 seconds (no GC)
-------

∂11-May-82  1457	Mabry Tyson <Tyson at SRI-AI> 	Lisp timings  
Date: 11 May 1982 1457-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: Lisp timings
To: rpg at SU-AI

I'd like to express my opinion that the timing tests so far have been
very strongly weighted towards arithmetic and manipulation of numbers.
Even TAK (but not MAS) suffers from this.

Very few programs that I have been associated with abuse the numeric
features of Lisp as much as these test programs.  If you were to give
a paper including only these results, I would protest that this was not
typical of Lisp programs and, for most of these programs, Fortran would
have been a better language to write them in (especially if you are just
looking at the execution time of the final program).  I admit I am tempted
to rewrite the FFT program as a Fortran program and then simply call it
from UCILISP.

I believe you said you were going to present the results at the Lisp
conference.  In that case, I think you ought to do one of two things.
1) Separate the results into categories.  Eg, "In manipulating array
references, FOOBAR lisp was the best..." or "FOOBAR lisp was best at
handling numerics". Or 2) Get some more typical programs.

In getting typical programs, I hope that you will be very careful to
try to stick to a (more or less) common subset of Lisp.  It has been
bothersome trying to translate the various lisps of the tests into
UCI-Lisp.  Perhaps example programs from Lisp texts would be good.
The first example that comes to mind is Wang's algorithm from the
"Lisp 1.5 Programmer's Manual".  It is still too simple a program (very
little besides CONDs, EQs, MEMBERs, and function calls) but I believe
it to be more representative than a FFT program.

Another example would be the differentiation program in Chapter 20 of
Weissman's "Lisp 1.5 Primer".  Unfortunately neither of these two
programs excercise the property list.  And I don't believe either of
them exercises CONS very much.  Certainly one of the test programs
ought to indicate the relative speed of building s-expressions (and
gc'ing them possibly).  Another test ought to see how fast lisps are
at running through S-expressions (CDR, CAR, etc).
-------

The problem I have been having with this whole project is to get
benchmarks that are representative from more people than just me.
I have a simple differentiation program that Vaughn Pratt submitted,
but I have not sent out. I will send it out and also will look at the
Wang theorem prover to see how reasonable it is. 

I talked to Forestt Baskett who pointed out a place to find some
other benchmarks to look at that the Army proposed. I doubt that
they will have much that is interesting.

Perhaps you could suggest a property list benchmark?

I intend to separate the benchmarks into what they test and
to then talk about which Lisps did best and why. Larry
Masinter and I are finishing up a paper on evaluation and
timing that discusses all the issues you bring up and more.
When it is in better shape I can send you a copy that has been
Dovered out. Otherwise, you could ftp timing.tex[1,rpg] from SAIL
to see what we are up to.

I appreciate your comments and help immeasurably.
			-rpg-
∂11-May-82  1546	Mabry Tyson <ATP.Tyson at UTEXAS-20> 	UCI Lisp    
Date: 11 May 1982 1741-CDT
From: Mabry Tyson <ATP.Tyson at UTEXAS-20>
Subject: UCI Lisp
To: rpg at SU-AI

Paul said you were wondering what the differences were between UT's version
of UCI-Lisp and "vanilla" UCI-Lisp.  The answer depends upon what is
"vanilla".  The 1973 version?  RUCI-Lisp? or Meehan's version?

The main difference for these timing tests is that we have the compiler that
is in RUCI-Lisp and Meehan's version (with a few bug fixes).  The other
main difference is that UT's version has had its EVAL source code massaged
so it is something like 10-20% faster.  This only makes a difference when
doing interpretation (and you can only achieve that if you are EVAL-bound).

I am including the documentation detailing the differences.  UT's version
has been distributed to a number of sites in the Texas-Louisiana area.
It is also being used by former graduates running at various spots arund the
country.  It has even been shipped to Germany.  I'm not sure just how far
secondary sources have spread it.

The principal maintainer of UT's version (me) is now gone from UTexas so
I don't know what the future is for it.  But I don't think any of the
people I know that worked on UCI-Lisp (Meehan, Lefaivre, Don Cohen, and
myself) are still working on it.  It will be up to the next generation
of hackers to see if anything comes of it.

On second thought, you probably don't want to have your mail file
cluttered with this documentation.  So FTP [UTEXAS-20]<UCILISP>UTALSP.DOC
for the detailed differences and a little history at the front.
-------

∂12-May-82  0003	Mabry Tyson <Tyson at SRI-AI> 	UCI-Lisp timing on Barrow FFT
Date: 11 May 1982 2354-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: UCI-Lisp timing on Barrow FFT
To: rpg at SU-AI

Here are the results for the Barrow FFT benchmark for UCI-Lisp (version
from UTexas, run on SRI-AI 2060).

Notes for this benchmark: UCI-Lisp does not have an ARRAYCALL function.  I
replaced the ARRAYCALL's by calls directly to the arrays.  Also, UCI-Lisp does
not have the size of an array on the property list of the array name so I
added a parameter to FFT that is the size of the array.  The arithmetic
functions COS, etc are not normally included in UCI-Lisp and are loaded from a
Fortran library by means of the loader.  This existed in the original UCI-Lisp
(1973) and RUCI-Lisp but I don't know offhand if it is distributed with
Meehan's version.

			Time w/o GC	GC	Total time
Interpreted		  16.130	1.829	17.959
Compiled, slow links	   6.739	1.815	 8.554
Compiled, fast links	   3.334	1.779	 5.113
-------

∂12-May-82  0003	Mabry Tyson <Tyson at SRI-AI> 	Note on UCI-Lisp timings on 2060  
Date: 12 May 1982 0002-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: Note on UCI-Lisp timings on 2060
To: rpg at SU-AI

I believe I said this a long time ago (on the first go around).  The timings
I get on the 2060 are subject to 10% swings (at least) depending on various
factors which are not clear to me.  I get better timings when the load is
low.  Earlier today I was getting 10% swings with the load factor running
from 0.5 - 1.5 but there were many jobs logged in.

In order to be consistent, I have been trying to do the timings late at night
where there should be a minimum of swapping or other interrupts.  Even so,
the timings vary.  I presume that I am getting charged for processing done
by the system on other jobs (interrupts, etc) and so I have been reporting
the fastest times I get.  I feel this represents the truest measure of the
speed of the program on this machine, ignoring load.  Generally I run a couple
or three tests and pick the best time.
-------

∂06-May-82  1750	Kim.fateman at Berkeley 	here's the code
Date: 6 May 1982 17:45:09-PDT
From: Kim.fateman at Berkeley
To: rpg@su-ai
Subject: here's the code

(cfasl 'fftc.o '←fft 'fftc 'subroutine) ;;; this loads the C program
					;;; and binds the entry point

;; fast fourier transform
(defun fft (re im)
(fftc re im (cadr(arraydims re)))) ;; realpart, imagpart, arraysize
				   ;; note that the interface on this side is
				   ;; about as simple as you could ask for

(setq re (array re flonum-block 1025.))
(setq im (array im flonum-block 1025.))

;; the rest of the file is for timing.

(defun runtime () (quotient (car (ptime)) 60.0))

(defun try (n) 
(prog (t0)
  (setq t0 (runtime))
  (do ((ntimes 0 (1+ ntimes)))
      ((= ntimes n))
      (fft re im))
  (setq t0 (difference (runtime) t0))
  (terpri)(terpri)
  (princ "total time = ")(princ t0)
  (princ "   for ")(princ n)(princ " iterations.")
  (terpri)
  (princ "average time = ")(princ (quotient t0 n))(princ " per iteration.")
  (terpri)(terpri)))
..............C code follows ...........

#include <stdio.h>

#define PI	3.141592653589793

fft (ar, ai, nn)
double ar[], ai[];
int *nn;
{
int n, m, nv2, le, r, k;
register int i,j, ip, l , le1; 
double tr, ti, ur, ui, wi, wr, angle, cos(), sin();
 
n = *nn - 1 ;

/*compute m = log(n)*/
m = 0;
for (i=1; i<n; i+=i)
	m++;

if (n != i) /* i = 2↑m */
   {printf("error...array size not a power of 2.\n"); return(0);}

nv2 = n/2;
j = 1;    /*interchange elements */
i = 1;    /*in bit-reversed order */

for (i=1; i<n; i++) {
	if (i < j) { 
		tr = ar[j]; ar[j] = ar[i]; ar[i] = tr;
		ti = ai[j]; ai[j] = ai[i]; ai[i] = ti;
		}

	for (k=nv2; k<j; k /= 2)
		j -= k;
	j += k;
	}

le = 1;
for (l = 1; l <= m; l++) {
	le += le; le1 = le/2;
	ur = 1.0; ui = 0.0;

	angle = PI/le1;
	wr = cos(angle); wi = sin(angle);
	for (j = 1; j <= le1; j++) { 
		for (i=j; i <= n; i += le) {
			ip = i + le1;
			tr = ar[ip]*ur - ai[ip]*ui;
			ti = ar[ip]*ui + ai[ip]*ur;
			ar[ip] = ar[i] - tr;
			ai[ip] = ai[i] - ti;
			ar[i]  = ar[i] + tr;
			ai[i]  = ai[i] + ti;
			}
		tr = ur*wr - ui*wi;
		ti = ur*wi + ui*wr;
		ur = tr;
		ui = ti;
		}
	}

return(0);
}
/* note that the code here has not been hacked to use pointers instead of
arrays; something some people claim will make another 40% speed-up.  The
clarity of the code is, I think, superior to Lisp, though of course it
had to be figured out, debugged, etc. in a less congenial environment. */ 

∂06-May-82  2009	Kim.fateman at Berkeley 	that fft program in C    
Date: 6 May 1982 20:04:40-PDT
From: Kim.fateman at Berkeley
To: rpg@su-ai
Subject: that fft program in C
Cc: Kim.jkf@Berkeley

seems to take about 0.89-0.91 seconds on a 750, no floating point accelerator.

∂06-May-82  1646	Kim.fateman at Berkeley 	fft benchmark  
Date: 6 May 1982 16:38:46-PDT
From: Kim.fateman at Berkeley
To: rpg@su-ai
Subject: fft benchmark
Cc: Kim.jkf@Berkeley

We were sent a copy of the fft benchmark earlier, and recoded it
in "C", just to see what that entailed.  Basically, a day or two
of figuring out how to write a program in "C" (something I am not
fluent in).
  The times seem to be about 0.58 to 0.60 seconds, on a vax 11/780
without floating point accelerator, using 64-bit floating point.
  The totally lisp times are much slower, and presumably will be
run by others (e.g. the suppliers of the benchmark); and perhaps
done here too.  For your edification, a subsequent message will
have both the lisp code and the fft (C) code.

∂25-May-82  0907	jkf@ucbkim at Berkeley 	frpoly benchmark, complete results  
Date: 25 May 1982 09:03:10-PDT
From: jkf@ucbkim at Berkeley
Mail-From: ucbkim received by UCBVAX at 25-May-82 08:46:04-PDT (Tue)
Date: 25-May-82 08:52:21-PDT (Tue)
From: jkf@ucbkim
Subject: frpoly benchmark, complete results
Via: ucbkim.EtherNet (V3.100 [3/27/82]); 25-May-82 08:52:27-PDT (Tue)
Via: ucbvax.EtherNet (V3.100 [3/27/82]); 25-May-82 08:46:04-PDT (Tue)
To: rpg@su-ai
Cc: jkf@fateman@Berkeley

frpoly benchmark			-[Tue May 25 08:51:42 1982 by jkf]-
Results for Franz Lisp Opus 38.18, and compiler Liszt 8.07

			11/780
Slow links:
(power= 2 (0.05 0.0) (0.06666666666666667 0.0) (0.06666666666666667 0.0))
(power= 5 (0.6 0.0) (1.18333 0.4166666666666667) (0.9833333333333333 0.4))
(power= 10 (7.6666667 0.8833) (14.866667 3.83333) (8.6166667 1.433333333333))
(power= 15 (52.51666666666667 5.316666667) (132.8 43.0) (54.2666667 6.95))

Normal links:
(power= 2 (0.01666667 0.0) (0.01666666667 0.0) (0.03333333333333333 0.0))
(power= 5 (0.2 0.0) (0.3833333333333333 0.0) (0.2166666666666667 0.0))
(power= 10 (2.333333333333333 0.0) (8.0 2.266666666666667) (2.6 0.0))
(power= 15 (20.6 4.016666666667) (86.5 30.3833333) (25.0833333 6.766666667))

Local Functions:
(power= 2 (0.01666666667 0.0) (0.016666667 0.0) (0.01666666666666667 0.0))
(power= 5 (0.15 0.0) (0.66666667 0.36666667) (0.51666667 0.3333333333333333))
(power= 10 (2.6 0.8333333333333333) (8.866667 3.7) (3.85 1.833333333333333))
(power= 15 (18.48333333 5.6333333) (93.866667 41.75) (21.7 6.966666666667))

 			11/750
Slow links:
(power= 2 (0.0833333333333 0.0) (0.116666666667 0.0) (0.08333333333333 0.0))
(power= 5 (0.933333333 0.0) (1.8 0.56666666667) (1.6166666667 0.633333333333))
(power= 10 (12.06666667 1.3666666667) (22.26667 5.93333) (13.86667 2.8833333))
(power= 15 (82.5666667 9.3833333) (202.683333 67.46666667) (86.83333 11.2167))

Normal links:
(power= 2 (0.05 0.0) (0.03333333333333333 0.0) (0.03333333333333333 0.0))
(power= 5 (0.35 0.0) (1.18333333 0.566667) (1.0166667 0.6666666666666667))
(power= 10 (5.3666667 1.383333) (14.95 5.816667) (7.266666666666667 2.95))
(power= 15 (37.233333 9.1166667) (155.45 67.15) (41.46666666666667 11.0))

Local functions:
(power= 2 (0.0333333 0.0) (0.03333333333333333 0.0) (0.01666666666666667 0.0))
(power= 5 (0.2666666666666667 0.0) (0.4833333333333333 0.0) (0.3 0.0))
(power= 10 (3.15 0.0) (11.81666666666667 3.516666666666667) (3.466666666666667 0.0))
(power= 15 (29.46666666666667 6.65) (129.5666666666667 48.21666666666667) (36.7 11.21666666666667))nil


Slow links means (sstatus translink nil)
Fast links means (sstatus translink t)
Local functions means that certain functions are declared to be local
  to this file.



∂22-May-82  2336	Martin.Griss <Griss at UTAH-20> 	MAS times   
Date: 23 May 1982 0004-MDT
From: Martin.Griss <Griss at UTAH-20>
Subject: MAS times
To: rpg at SU-AI
cc: griss at UTAH-20

ON version 3 PSL:
DEC-20, 2.568 secs
VAX/750 11.59 secs

Will look at see if I missed anything. What do you expect?
-------

∂21-May-82  2048	Martin.Griss <Griss at UTAH-20> 	Latest PSL Tak times  
Date: 21 May 1982 2115-MDT
From: Martin.Griss <Griss at UTAH-20>
Subject: Latest PSL Tak times
To: rpg at SU-AI
cc: griss at UTAH-20

We are now running V3 PSL on all machines (20, VAX and Apollo).
I attach following tiomes, tho am suprized that ARITH time went up.
May indicate other opts slipped in recent builds.
Latest PSL Times for TAK (the older JMC TAK)
As of 9:08pm  Friday, 21 May 1982
Note that in V3 PSL, we have new tagging scheme (0 for POSINT, -1 for
NEGINT, so INUM times replace==are SYSLISP times]

For some reason, Generic ARITH times went up (?) will check.

Utah-20: PSL (TAKF, Inum)	443 ms
         PSL (generic arith)   1936 ms       [Was 1.672, need test INUM first?]
         Fast Link LISP 1.6     990 ms

VAX-11/750:
	PSL  (TAKF,Inum)       1292 ms
        PSL   (generic)        7344 ms

Apollo/Domain
	PSL  (TAKF, Inum)      2932 ms
-------

∂02-Jul-82  0007	RPG  	Lack of Response   
To:   lisptranslators at SU-AI   
I have not been receiving many timings for the various benchmarks I
have sent out. In particular I have few responses from the various
Interlisps and the Lisp Machines; the most diligent people have
been Mabry Tyson, Martin Griss, and Chuck Hedrick. I have access to
Interlisp on a 2060, a Vax, and a Dolphin; I can get access to LM-2's
at Fairchild and Symbolics in Palo Alto. So, if you prefer - this is a
threat - I can do these timings (badly, inaccurately, and with bias
born of no interest) myself. I would like to get some more results for
the Lisp conference in August, so please bear down. And if you have any
benchmarks you think will embarrasss the competition send them along.
			-rpg-

∂06-Jul-82  1539	RPG  	Symbolic Derivative Benchmark
To:   lisptranslators at SU-AI   
Here is a symbolic derivative benchmark that Vaughn Pratt wrote. It uses
a simple subset of Lisp and does a *lot* of CONSing. Below is the code
for that benchmark; please refer to it as DERIV:

(DECLARE (MAPEX T))	;This makes MAPCAR open-code

(DEFUN DER1 (A) (LIST 'QUOTIENT (DERIV A) A))

(DEFUN DERIV (A)
	(COND 
	 ((ATOM A)
	  (COND ((EQ A 'X) 1) (T 0)))
	 ((EQ (CAR A) 'PLUS)	(CONS 'PLUS (MAPCAR 'DERIV (CDR A))))
	 ((EQ (CAR A) 'DIFFERENCE) 
	  (CONS 'DIFFERENCE (MAPCAR 'DERIV 
				    (CDR A))))
	 ((EQ (CAR A) 'TIMES)
	  (LIST 'TIMES 
		A 
		(CONS 'PLUS (MAPCAR 'DER1 (CDR A)))))
	 ((EQ (CAR A) 'QUOTIENT)
	  (LIST 'DIFFERENCE 
		(LIST 'QUOTIENT 
		      (DERIV (CADR A)) 
		      (CADDR A))
		(LIST 'QUOTIENT 
		      (CADR A) 
		      (LIST 'TIMES
			    (CADDR A)
			    (CADDR A)
			    (DERIV (CADDR A))))))
	 (T 'ERROR)))

(DEFUN RUN ()
 (DECLARE (FIXNUM I))	;Improves the code a little
 (DO ((I 0 (1+ I)))
     ((= I 1000.))	;Runs it 5000 times
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))))

Here is a sample run and some times from SAIL using MacLisp:

(fasload deriv)
(timit)

Timing performed on Tuesday 07/06/82 at 15:17:55.
Cpu Time = 2.091			;CPU seconds
Elapsed Time = 152.083334		;real time in seconds
Wholine Time = 38.55			;CPU plus memory waits
GC Time = 19.436			;GC time in seconds
Load Average Before  = 1.4194169	;SAIL load average
Load Average After   = 3.0533738
Average Load Average = 2.23639536

The next 2 benchmarks will be variants of this.
			-rpg-

∂06-Jul-82  1605	RPG  	Symbolic Derivative (2) 
To:   lisptranslators at SU-AI   
This benchmark is a variant of the simple symbolic derivative
program I sent recently. The main change is that it is `table-driven.'
Instead of a large COND that branches on the CAR of the expression, this
program finds the code that will take the derivative on the property list
of the atom in the CAR position. So, when the expression is (PLUS . <rest>),
the code stored under the atom PLUS with indicator DERIV will take <rest>
and return the derivative for PLUS. The way that MacLisp does this is with
the special form: (DEFUN (FOO BAR) ...). This is exactly like DEFUN with
an atomic name in that it expects an argument list and the compiler compiles
code, but the code is stored on the property list of FOO under the indicator BAR,
in this case. You may have to do something like:

(DEFUN DPLUS (A) ...)
(PUTPROP 'PLUS (GETF 'DPLUS) 'DERIV)

where GETF gets the functional value of DPLUS. In MacLisp this would be:
(DEFUN GETF (X)(GET X 'SUBR))

Here is the code:

(DECLARE (MAPEX T))	;causes MAPCAR's to open-code

(DEFUN DER1 (A) (LIST 'QUOTIENT (DERIV A) A))

(DEFUN (PLUS DERIV) (A)
       (CONS 'PLUS (MAPCAR 'DERIV A)))

(DEFUN (DIFFERENCE DERIV) (A)
       (CONS 'DIFFERENCE (MAPCAR 'DERIV 
				 A)))

(DEFUN (TIMES DERIV) (A)
       (LIST 'TIMES A 
	     (CONS 'PLUS (MAPCAR 'DER1 A))))

(DEFUN (QUOTIENT DERIV) (A)
       (LIST 'DIFFERENCE 
	     (LIST 'QUOTIENT 
		   (DERIV (CAR A)) 
		   (CADR A))
	     (LIST 'QUOTIENT 
		   (CAR A) 
		   (LIST 'TIMES
			 (CADR A)
			 (CADR A)
			 (DERIV (CADR A))))))

;;; FUNCALL (for the 1 argument case) can be defined as:
;;;	(DEFUN FUNCALL (F X)
;;;	 	(APPLY F (NCONS X)))
;;;
;;; Using macros FUNCALL is (in the general case):
;;;  	(DEFMACRO FUNCALL (F . X)
;;;		`(APPLY ,F (LIST . ,X)))

 (DEFUN DERIV (A)
	(COND 
	 ((ATOM A)
	  (COND ((EQ A 'X) 1) (T 0)))
	 (T (LET ((DERIV (GET (CAR A) 'DERIV)))
		 (COND (DERIV (FUNCALL DERIV (CDR A)))
		       (T 'ERROR))))))

(DEFUN RUN ()
 (DECLARE (FIXNUM I))
 (DO ((I 0 (1+ I)))
     ((= I 1000.))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))))

Here is a sample run on SAIL in MacLisp:
(fasload dderiv)
(timit)

Timing performed on Tuesday 07/06/82 at 16:01:46.
Cpu Time = 3.12
Elapsed Time = 66.9
Wholine Time = 39.233333
GC Time = 18.734
Load Average Before  = 1.03375137
Load Average After   = 1.3326118
Average Load Average = 1.18318158

Refer to this benchmark as DDERIV.
			-rpg-

∂06-Jul-82  1613	RPG  	Symbolic Derivative (2) 
To:   lisptranslators at SU-AI   
Note: the description that I sent for this benchmark was in error;
please use this instead. You can simply ignore the previous message.
I was describing another variant of DEFUN, which is used in the third
of the derivative benchmarks.

This benchmark is a variant of the simple symbolic derivative program I
sent recently. The main change is that it is `table-driven.'  Instead of a
large COND that branches on the CAR of the expression, this program finds
the code that will take the derivative on the property list of the atom in
the CAR position. So, when the expression is (PLUS . <rest>), the code
stored under the atom PLUS with indicator DERIV will take <rest> and
return the derivative for PLUS. The way that MacLisp does this is with the
special form: (DEFUN (FOO BAR) ...). This is exactly like DEFUN with an
atomic name in that it expects an argument list and the compiler compiles
code, but the name of the function with that code is stored on the
property list of FOO under the indicator BAR, in this case. You may have
to do something like:

(DEFUN DPLUS (A) ...)
(PUTPROP 'PLUS 'DPLUS 'DERIV)

Here is the code:

(DECLARE (MAPEX T))	;causes MAPCAR's to open-code

(DEFUN DER1 (A) (LIST 'QUOTIENT (DERIV A) A))

(DEFUN (PLUS DERIV) (A)
       (CONS 'PLUS (MAPCAR 'DERIV A)))

(DEFUN (DIFFERENCE DERIV) (A)
       (CONS 'DIFFERENCE (MAPCAR 'DERIV 
				 A)))

(DEFUN (TIMES DERIV) (A)
       (LIST 'TIMES A 
	     (CONS 'PLUS (MAPCAR 'DER1 A))))

(DEFUN (QUOTIENT DERIV) (A)
       (LIST 'DIFFERENCE 
	     (LIST 'QUOTIENT 
		   (DERIV (CAR A)) 
		   (CADR A))
	     (LIST 'QUOTIENT 
		   (CAR A) 
		   (LIST 'TIMES
			 (CADR A)
			 (CADR A)
			 (DERIV (CADR A))))))

;;; FUNCALL (for the 1 argument case) can be defined as:
;;;	(DEFUN FUNCALL (F X)
;;;	 	(APPLY F (NCONS X)))
;;;
;;; Using macros FUNCALL is (in the general case):
;;;  	(DEFMACRO FUNCALL (F . X)
;;;		`(APPLY ,F (LIST . ,X)))

 (DEFUN DERIV (A)
	(COND 
	 ((ATOM A)
	  (COND ((EQ A 'X) 1) (T 0)))
	 (T (LET ((DERIV (GET (CAR A) 'DERIV)))
		 (COND (DERIV (FUNCALL DERIV (CDR A)))
		       (T 'ERROR))))))

(DEFUN RUN ()
 (DECLARE (FIXNUM I))
 (DO ((I 0 (1+ I)))
     ((= I 1000.))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))))

Here is a sample run on SAIL in MacLisp:
(fasload dderiv)
(timit)

Timing performed on Tuesday 07/06/82 at 16:01:46.
Cpu Time = 3.12
Elapsed Time = 66.9
Wholine Time = 39.233333
GC Time = 18.734
Load Average Before  = 1.03375137
Load Average After   = 1.3326118
Average Load Average = 1.18318158

Refer to this benchmark as DDERIV.
			-rpg-

∂06-Jul-82  1630	RPG  	Symbolic Derivative (3) 
To:   lisptranslators at SU-AI   
This is the third in the series of symbolic derivative benchmarks. It uses
a faster variant of the table-driven idea used in DDERIV. In that benchmark,
the *name* of the function that took the derivative was stored under the
indicator DERIV on the property list for the function that is being 
differentiated. Here we put the actual code on that property list. In the MacLisp
case, a code pointer is placed there, and a special code-pointer applier is
used. First, consider the form (DEFUN (PLUS DERIV DERIV) ...). What this
accomplishes (for our purpose) is to put the code pointer for the defined
function on the property list of PLUS under the indicator DERIV. This is
equivalent in effect to:

(DEFUN DPLUS ...)
(PUTPROP 'PLUS (GETF 'DPLUS) 'DERIV)
(REMF 'DPLUS)

where GETF gets the code pointer and REMF flushes it. In MacLisp
these could be defined as:

(DEFUN GETF (X)(GET X 'SUBR))
(DEFUN REMF (X)(REMPROP X 'SUBR))

The funny repeated reference to DERIV (as in (DEFUN (PLUS DERIV DERIV)...))
							    ↑     ↑
has to do with interpreter versus compiler behavior.

SUBRCALL is like FUNCALL but takes a code pointer instead. In the
call, (SUBRCALL T DERIV (CDR A)), the `T' means that the function
returns a pointer rather than a FIXNUM (for example).
Using macros FUNCALL is (in the general case):

(DEFMACRO FUNCALL (F . X)
	  `(APPLY ,F (LIST . ,X)))

Here is the code:

(DECLARE (MAPEX T))	;Causes MAPCAR to open-code

(DEFUN DER1 (A) (LIST 'QUOTIENT (DERIV A) A))

(DEFUN (PLUS DERIV DERIV) (A)
       (CONS 'PLUS (MAPCAR 'DERIV A)))

(DEFUN (DIFFERENCE DERIV DERIV) (A)
       (CONS 'DIFFERENCE (MAPCAR 'DERIV 
				 A)))

(DEFUN (TIMES DERIV DERIV) (A)
       (LIST 'TIMES A 
	     (CONS 'PLUS (MAPCAR 'DER1 A))))

(DEFUN (QUOTIENT DERIV DERIV) (A)
       (LIST 'DIFFERENCE 
	     (LIST 'QUOTIENT 
		   (DERIV (CAR A)) 
		   (CADR A))
	     (LIST 'QUOTIENT 
		   (CAR A) 
		   (LIST 'TIMES
			 (CADR A)
			 (CADR A)
			 (DERIV (CADR A))))))

 (DEFUN DERIV (A)
	(COND 
	 ((ATOM A)
	  (COND ((EQ A 'X) 1) (T 0)))
	 (T (LET ((DERIV (GET (CAR A) 'DERIV)))
		 (COND (DERIV (SUBRCALL T DERIV (CDR A)))
		       (T 'ERROR))))))

(DEFUN RUN ()
 (DECLARE (FIXNUM I))
 (DO ((I 0 (1+ I)))
     ((= I 1000.))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))
     (DERIV '(PLUS (TIMES 3 X X) (TIMES A X X) (TIMES B X) 5))))

Here is a sample run on SAIL using MacLisp:

(fasload fdderiv)
(timit)

Timing performed on Tuesday 07/06/82 at 16:27:15.
Cpu Time = 2.375
Elapsed Time = 115.166667
Wholine Time = 38.5333333
GC Time = 18.393
Load Average Before  = 1.38997114
Load Average After   = 1.99293315
Average Load Average = 1.69145215

Refer to this benchmark as FDDERIV.
			-rpg-

∂06-Jul-82  1634	RPG  	Progress 
To:   lisptranslators at SU-AI   
Here is my chart of who has done which benchmarks. Please note
where you stand and get those results in. I can retransmit any
benchmarks you have lost.

Who has done what:

	    | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
rpg	    |u |u |u |u |u |u |  |  |  |u |  |  |  | 
sail(McLsp) | X| X| X| X| X| X| X| X| X| X|  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
rpg	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
score(McLsp)|  | X|  |  | X|  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
rpg	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
F2(McLsp)   | X| X| X| X| X| X|  |  |  | X|  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
Tyson	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
20(UTLISP)  | X|  | X|  |  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
Tyson	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
20(UCILISP) |  |  | X| X|  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
Hedrick	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
20(UCILISP) | X| X|  |  |  | X|  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
Hedrick	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
20(ELISP)   | X|  | X| X|  | X|  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
JKF	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
780(fr)	    | X| X| X| X|  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
JKF	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
750(fr)	    |  | X| X| X|  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
LMM	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
Dolph	    |  | X|  |  |  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
LMM	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
10(InterLsp)|  |  |  |  |  |  |  |  |  | X|  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
GJC	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
780(NIL)    |  | X|  |  |  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
LMM	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
Dorado	    |  | X|  |  |  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
Griss	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
20(PSL)     |  | X|  | X|  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
Griss	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
750(PSL)    |  | X|  | X|  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
Griss	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
Apollo(PSL) |  | X|  |  |  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
HIC	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
LM-2	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
ROD	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
CADR	    |  | X|  |  |  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
Greenberg   |  |  |  |  |  |  |  |  |  |  |  |  |  | 
Multics	    | X|  | X|  |  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
rpg         |  |  |  |  |  |  |  |  |  |  |  |  |  | 
S1     	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
Fahlman     |  |  |  |  |  |  |  |  |  |  |  |  |  | 
SPICELISP   |  |  |  |  |  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
CMU         |  |  |  |  |  |  |  |  |  |  |  |  |  | 
780(CL)     |  |  |  |  |  |  |  |  |  |  |  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------

Notes. `u' means uncached times were taken as well.

1. SCCPP
2. TAK
3. FRPOLY
4. TAKL
5. TAKR
6. PUZZLE
7. DERIV
8. DDERIV
9. FDERIV
10. FFT

∂06-Jul-82  1703	Kim.fateman at Berkeley 	Re:  Progress  
Date: 6 Jul 1982 16:49:48-PDT
From: Kim.fateman at Berkeley
To: RPG@SU-AI
Subject: Re:  Progress
Cc: Kim.jkf@Berkeley

I just ran DERIV; Franz Lisp opus 38.20 on a vax 11/780 running with
a load average of between .5 and .8  (light).  Recorded time 27 seconds
of which 16 were in garbage collection.
The only change to the file was to declare (localf der1 deriv).

jkf: get back to your thesis.

∂06-Jul-82  1724	ARPAVAX.fateman at Berkeley   
Date: 6 Jul 1982 17:02:05-PDT
From: ARPAVAX.fateman at Berkeley
To: rpg@su-ai
Cc: ARPAVAX.jkf@Berkeley

dderiv under the same conditions, 30.8 sec, 17.28 in GC.

∂06-Jul-82  1724	Kim.fateman at Berkeley 	benchmark 3    
Date: 6 Jul 1982 17:18:10-PDT
From: Kim.fateman at Berkeley
To: rpg@su-ai
Subject: benchmark 3

(frpoly)  was translated into interlisp (initially by me, and
then hacked a little by dave dyer).  Its code follows in this
message.  I suggest you relay it to interested interlisp parties
if you want timings.
(I thought I sent you a copy previously).
.......

(FILECREATED "20-Feb-82 19:42:04" ε∧<DDYER>IPOLY..13ε↓ 6186   

     previous date: "20-Feb-82 19:36:45" <DDYER>IPOLY..11)


(PRETTYCOMPRINT IPOLYCOMS)

(RPAQQ εαIPOLYCOMSε↓ ((DECLARE: DONTEVAL@LOAD DOEVAL@COMPILE DONTCOPY
			    (P (SPECVARS ANS COEF F INC I K QQ SS V *X* *ALPHA *A* *B* *CHK *L *P Q* 
					 U* *VAR *Y* R R2 R3 START RES1 RES2 RES3)))
	(FNS PCOEFADD PCPLUS PCPLUS1 PPLUS PTIMES PTIMES1 PTIMES2 PTIMES3 PSIMP PCTIMES PCTIMES1 
	     PEXPTSQ PPLUS1 BENCH ODDP SETUP)
	(MACROS * IPOLYMACROS)))
(DECLARE: DONTEVAL@LOAD DOEVAL@COMPILE DONTCOPY 
(SPECVARS ANS COEF F INC I K QQ SS V *X* *ALPHA *A* *B* *CHK *L *P Q* U* *VAR *Y* R R2 R3 START RES1 
	  RES2 RES3)
)
(DEFINEQ

(ε∧PCOEFADDε↓
  [LAMBDA (E C X)
    (COND
      ((PZEROP C)
	X)
      (T (CONS E (CONS C X])

(ε∧PCPLUSε↓
  [LAMBDA (C P)
    (COND
      ((PCOEFP P)
	(CPLUS P C))
      (T (εαPSIMPε↓ (CAR P)
		(εαPCPLUS1ε↓ C (CDR P])

(ε∧PCPLUS1ε↓
  [LAMBDA (C X)
    (COND
      [(NULL X)
	(COND
	  ((PZEROP C)
	    NIL)
	  (T (CONS 0 (CONS C NIL]
      ((PZEROP (CAR X))
	(εαPCOEFADDε↓ 0 (εαPPLUSε↓ C (CADR X))
		  NIL))
      (T (CONS (CAR X)
	       (CONS (CADR X)
		     (εαPCPLUS1ε↓ C (CDDR X])

(ε∧PPLUSε↓
  [LAMBDA (X Y)
    (COND
      ((PCOEFP X)
	(εαPCPLUSε↓ X Y))
      ((PCOEFP Y)
	(εαPCPLUSε↓ Y X))
      [(EQ (CAR X)
	   (CAR Y))
	(εαPSIMPε↓ (CAR X)
	       (εαPPLUS1ε↓ (CDR Y)
		       (CDR X]
      [(POINTERGP (CAR X)
		  (CAR Y))
	(εαPSIMPε↓ (CAR X)
	       (εαPCPLUS1ε↓ Y (CDR X]
      (T (εαPSIMPε↓ (CAR Y)
		(εαPCPLUS1ε↓ X (CDR Y])

(ε∧PTIMESε↓
  [LAMBDA (X Y)
    (COND
      ((OR (PZEROP X)
	   (PZEROP Y))
	(PZERO))
      ((PCOEFP X)
	(εαPCTIMESε↓ X Y))
      ((PCOEFP Y)
	(εαPCTIMESε↓ Y X))
      [(EQ (CAR X)
	   (CAR Y))
	(εαPSIMPε↓ (CAR X)
	       (εαPTIMES1ε↓ (CDR X)
			(CDR Y]
      [(POINTERGP (CAR X)
		  (CAR Y))
	(εαPSIMPε↓ (CAR X)
	       (εαPCTIMES1ε↓ Y (CDR X]
      (T (εαPSIMPε↓ (CAR Y)
		(εαPCTIMES1ε↓ X (CDR Y])

(ε∧PTIMES1ε↓
  [LAMBDA (*X* Y)
    (PROG (U* V)
          (SETQ V (SETQ U*(εαPTIMES2ε↓ Y)))
      A   (SETQ *X*(CDDR *X*))
          (COND
	    ((NULL *X*)
	      (RETURN U*)))
          (εαPTIMES3ε↓ Y)
          (GO A])

(ε∧PTIMES2ε↓
  [LAMBDA (Y)
    (COND
      ((NULL Y)
	NIL)
      (T (εαPCOEFADDε↓ (PLUS (CAR *X*)
			 (CAR Y))
		   (εαPTIMESε↓ (CADR *X*)
			   (CADR Y))
		   (εαPTIMES2ε↓ (CDDR Y])

(ε∧PTIMES3ε↓
  [LAMBDA (Y)
    (PROG (E U C)
      A1  (COND
	    ((NULL Y)
	      (RETURN NIL)))
          (SETQ E (IPLUS (CAR *X*)
			 (CAR Y)))
          (SETQ C (εαPTIMESε↓ (CADR Y)
			  (CADR *X*)))
          (COND
	    ((PZEROP C)
	      (SETQ Y (CDDR Y))
	      (GO A1))
	    ((OR (NULL V)
		 (IGREATERP E (CAR V)))
	      [SETQ U*(SETQ V (εαPPLUS1ε↓ U*(LIST E C]
	      (SETQ Y (CDDR Y))
	      (GO A1))
	    ((IEQP E (CAR V))
	      (SETQ C (εαPPLUSε↓ C (CADR V)))
	      (COND
		[(PZEROP C)
		  (SETQ U*(SETQ V (PDIFFER1 U*(LIST (CAR V)
						    (CADR V]
		(T (RPLACA (CDR V)
			   C)))
	      (SETQ Y (CDDR Y))
	      (GO A1)))
      A   (COND
	    ((AND (CDDR V)
		  (IGREATERP (CADDR V)
			     E))
	      (SETQ V (CDDR V))
	      (GO A)))
          (SETQ U (CDR V))
      B   (COND
	    ((OR (NULL (CDR U))
		 (ILESSP (CADR U)
			 E))
	      [RPLACD U (CONS E (CONS C (CDR U]
	      (GO E)))
          (COND
	    ((PZEROP (SETQ C (εαPPLUSε↓ (CADDR U)
				    C)))
	      (RPLACD U (CDDDR U))
	      (GO D))
	    (T (RPLACA (CDDR U)
		       C)))
      E   (SETQ U (CDDR U))
      D   (SETQ Y (CDDR Y))
          (COND
	    ((NULL Y)
	      (RETURN NIL)))
          (SETQ E (IPLUS (CAR *X*)
			 (CAR Y)))
          (SETQ C (εαPTIMESε↓ (CADR Y)
			  (CADR *X*)))
      C   (COND
	    ((AND (CDR U)
		  (IGREATERP (CADR U)
			     E))
	      (SETQ U (CDDR U))
	      (GO C)))
          (GO B])

(ε∧PSIMPε↓
  [LAMBDA (VAR X)
    (COND
      ((NULL X)
	0)
      ((ATOM X)
	X)
      ((ZEROP (CAR X))
	(CADR X))
      (T (CONS VAR X])

(ε∧PCTIMESε↓
  [LAMBDA (C P)
    (COND
      ((PCOEFP P)
	(CTIMES C P))
      (T (εαPSIMPε↓ (CAR P)
		(εαPCTIMES1ε↓ C (CDR P])

(ε∧PCTIMES1ε↓
  [LAMBDA (C X)
    (COND
      ((NULL X)
	NIL)
      (T (εαPCOEFADDε↓ (CAR X)
		   (εαPTIMESε↓ C (CADR X))
		   (εαPCTIMES1ε↓ C (CDDR X])

(ε∧PEXPTSQε↓
  [LAMBDA (P N)
    (PROG (S)
          (SETQ S (COND
	      ((εαODDPε↓ N)
		P)
	      (T 1)))
          (SETQ N (QUOTIENT N 2))
      LOOP(COND
	    ((ZEROP N)
	      (RETURN S)))
          (SETQ P (εαPTIMESε↓ P P))
          (AND (εαODDPε↓ N)
	       (SETQ S (εαPTIMESε↓ S P)))
          (SETQ N (QUOTIENT N 2))
          (GO LOOP])

(ε∧PPLUS1ε↓
  [LAMBDA (X Y)
    (COND
      ((NULL X)
	Y)
      ((NULL Y)
	X)
      [(IEQP (CAR X)
	     (CAR Y))
	(εαPCOEFADDε↓ (CAR X)
		  (εαPPLUSε↓ (CADR X)
			 (CADR Y))
		  (εαPPLUS1ε↓ (CDDR X)
			  (CDDR Y]
      [(IGREATERP (CAR X)
		  (CAR Y))
	(CONS (CAR X)
	      (CONS (CADR X)
		    (εαPPLUS1ε↓ (CDDR X)
			    Y]
      (T (CONS (CAR Y)
	       (CONS (CADR Y)
		     (εαPPLUS1ε↓ X (CDDR Y])

(ε∧BENCHε↓
  [LAMBDA (N)
    (TIME (εαPEXPTSQε↓ R N)
	  1 3])

(ε∧ODDPε↓
  [LAMBDA (X)
    (EQP (REMAINDER X 2)
	 1])

(ε∧SETUPε↓
  [LAMBDA NIL
    (PUTPROP (QUOTE X)
	     (QUOTE ORDER)
	     1)
    (PUTPROP (QUOTE Y)
	     (QUOTE ORDER)
	     2)
    (PUTPROP (QUOTE Z)
	     (QUOTE ORDER)
	     3)
    [SETQ R (εαPPLUSε↓ (QUOTE (X 1 1 0 1))
		   (εαPPLUSε↓ (QUOTE (Y 1 1))
			  (QUOTE (Z 1 1]
    (SETQ R2 (εαPTIMESε↓ R 100000))
    (SETQ R3 (εαPTIMESε↓ R 1.0])
)

(RPAQQ εαIPOLYMACROSε↓ (CPLUS CTIMES PCOEFP POINTERGP PZERO PZEROP))
(DECLARE: EVAL@COMPILE 

(PUTPROPS εαCPLUS MACROε↓ [LAMBDA (X Y)
			(PLUS X Y])

(PUTPROPS εαCTIMES MACROε↓ [LAMBDA (X Y)
			 (TIMES X Y])

(PUTPROPS εαPCOEFP MACROε↓ [LAMBDA (E)
			 (ATOM E])

(PUTPROPS εαPOINTERGP MACROε↓ [LAMBDA (X Y)
			    (IGREATERP (GETPROP X (QUOTE ORDER))
				       (GETPROP Y (QUOTE ORDER])

(PUTPROPS εαPZERO MACROε↓ [LAMBDA NIL 0])

(PUTPROPS εαPZEROP MACROε↓ [LAMBDA (X)
			 (EQP X 0])
)
(DECLARE: DONTCOPY
  (FILEMAP (NIL (694 5651 (PCOEFADD 706 . 803) (PCPLUS 807 . 936) (PCPLUS1 940 . 1218) (PPLUS 1222 . 
1590) (PTIMES 1594 . 2013) (PTIMES1 2017 . 2246) (PTIMES2 2250 . 2441) (PTIMES3 2445 . 3914) (PSIMP 
3918 . 4062) (PCTIMES 4066 . 4198) (PCTIMES1 4202 . 4361) (PEXPTSQ 4365 . 4727) (PPLUS1 4731 . 5156) (
BENCH 5160 . 5224) (ODDP 5228 . 5285) (SETUP 5289 . 5648)))))
STOP

∂06-Jul-82  1740	ARPAVAX.fateman at Berkeley   
Date: 6 Jul 1982 17:35:21-PDT
From: ARPAVAX.fateman at Berkeley
To: rpg@su-ai
Cc: ARPAVAX.jkf@Berkeley

jkf reminded me to do (sstatus translink on)  to get faster times,
so with that, on a 780:
deriv = 25,2, 16.9 of which is GC
dderiv = 27.4, 17.6 of which is GC

fdderiv requires subrcall which we don't have exactly.  I haven't
looked to see how funcall can work instead.

∂06-Jul-82  1802	Kim.fateman at Berkeley 	deriv
Date: 6 Jul 1982 17:57:22-PDT
From: Kim.fateman at Berkeley
To: rpg@su-ai
Subject: deriv
Cc: Kim.jkf@Berkeley

for a (load average 4) vax 750, no floating point accelerator, Franz opus 38.20
36.06 of which 22.2 is in GC.

I think a lighter load on the 750 would help somewhat.

∂06-Jul-82  2047	Kim.fateman at Berkeley 	fft benchmark? 
Date: 6 Jul 1982 20:42:49-PDT
From: Kim.fateman at Berkeley
To: rpg@su-ai
Subject: fft benchmark?
Cc: Kim.jkf@Berkeley, Kim.soiffer@Berkeley

I don't know if this fft benchmark is what you had in mind,
but it was sent to us by Mike Deering, (Fairchild), and is
a 1024 point complex FFT.  In the original form, it ran
at about 32 seconds, and we beat it down to about 25, I think,
with declarations and whatnot.  Since it was a particularly
ugly piece of lisp, we re-wrote it in C, in a functionally
identical manner  (that is, unless you maybe interrupted in the
middle there was no way to tell the difference).  The time
in C is 0.38 seconds, which was about what the KL-10 maclisp
system took.  The C code apparently could be bummed some, but
it was not.  It is a LOT easier to read than the lisp.
...... lisp code follows...
;From Deering@SRI-KL  Sun Apr  4 00:33:45 1982
;Date:  3 Apr 1982 0056-PST
;From: Deering at SRI-KL
;Subject: FFTF.l
;To: kim.fateman at UCB-C70
;
     ;It seems that the timmings for franz were under VMS, under our 4.n BSD
;we got the compiled FFT to run about twice as fast, ~32 seconds.  The FFT
;program follows:

(sstatus uctolc t)

(defun fft					        ;fast fourier transform
  (areal aimag)                                         ;areal = real part
(prog							;aimag = imaginary part
  (ar ai pi i j k m n le le1 ip nv2 ur ui wr wi tr ti)
  ;    (setq ar (get areal 'array))			;initialize
  ;    (setq ai (get aimag 'array))
  (setq ar areal)
  (setq ai aimag)
  (setq pi 3.141592653589793)
  (setq n (cadr (arraydims ar)))
  (setq n (1- n))
  (setq nv2 (quotient n 2))
  (setq m 0)						;compute m = log(n)
  (setq i 1)
  l1 (cond
      ((< i n)(setq m (1+ m))(setq i (+ i i))(go l1)))
  (cond ((not (equal n (expt 2 m)))
	 (princ "error ... array size not a power of two.")
	 (read)
	 (return (terpri))))
  (setq j 1)						;interchange elements
  (setq i 1)						;in bit-reversed order
  l3 (cond ((< i j)
	    (setq tr (arraycall flonum ar j))
	    (setq ti (arraycall flonum ai j))
	    (store (arraycall flonum ar j) (arraycall flonum ar i))
	    (store (arraycall flonum ai j) (arraycall flonum ai i))
	    (store (arraycall flonum ar i) tr)	
	    (store (arraycall flonum ai i) ti)))
  (setq k nv2)
  l6 (cond ((< k j) (setq j (- j k))(setq k (quotient k 2))(go l6)))
  (setq j (+ j k))
  (setq i (1+ i))
  (cond ((< i n)(go l3)))
  (do l 1 (1+ l) (> l m)				;loop thru stages
      (setq le (expt 2 l))
      (setq le1 (quotient le 2))
      (setq ur 1.0)
      (setq ui 0.0)
      (setq wr (cos (quotient pi (float le1))))
      (setq wi (sin (quotient pi (float le1))))
      (do j 1 (1+ j) (> j le1)			;loop thru butterflies
	  (do i j (+ i le) (> i n)			;do a butterfly
	      (setq ip (+ i le1))
	      (setq tr (difference (times (arraycall flonum ar ip) ur)
				   (times (arraycall flonum ai ip) ui)))
	      (setq ti (plus (times (arraycall flonum ar ip) ui)
			     (times (arraycall flonum ai ip) ur)))
	      (store (arraycall flonum ar ip)
		     (difference (arraycall flonum ar i) tr))
	      (store (arraycall flonum ai ip)
		     (difference (arraycall flonum ai i) ti))
	      (store (arraycall flonum ar i)
		     (plus (arraycall flonum ar i) tr))
	      (store (arraycall flonum ai i)
		     (plus (arraycall flonum ai i) ti)))
	  (setq tr (difference (times ur wr) (times ui wi)))
	  (setq ti (plus (times ur wi) (times ui wr)))
	  (setq ur tr)
	  (setq ui ti)))
  (return t)))



(setq re (array re flonum 1025.))

(setq im (array im flonum 1025.))



(defun runtime ()
  (quotient (car (ptime)) 60.0))



(defun try (n)
  (prog (t0)
    (setq t0 (runtime))
    (do ((ntimes 0 (1+ ntimes)))
	((= ntimes n))
      (fft re im))
    (setq t0 (difference (runtime) t0))
    (terpri)(terpri)
    (princ "total time = ")(princ t0)
    (princ "   for ")(princ n)(princ " iterations.")
    (terpri)
    (princ "average time = ")(princ (quotient t0 n))(princ " per iteration.")
    (terpri)(terpri)))

(print 'done)

........ interface to C code follows

(cfasl 'fftc.o '←fft 'fftc 'subroutine)

;; fast fourier transform
(defun fft (re im)
(fftc re im (cadr(arraydims re)))) ;; realpart, imagpart, arraysize

(setq re (array re flonum-block 1025.))
(setq im (array im flonum-block 1025.))

;; the rest of the file is for timing.
;; same as above
...... the C code for FFT follows

#include <stdio.h>

#define PI	3.141592653589793

fft (ar, ai, nn)
double ar[], ai[];
int *nn;
{
int n, m, nv2, le, r, k;
register int i,j, ip, l , le1; 
double tr, ti, ur, ui, wi, wr, angle, cos(), sin();
 
n = *nn - 1 ;

/*compute m = log(n)*/
m = 0;
for (i=1; i<n; i+=i)
	m++;

if (n != i) /* i = 2↑m */
   {printf("error...array size not a power of 2.\n"); return(0);}

nv2 = n/2;
j = 1;    /*interchange elements */
i = 1;    /*in bit-reversed order */

for (i=1; i<n; i++) {
	if (i < j) { 
		tr = ar[j]; ar[j] = ar[i]; ar[i] = tr;
		ti = ai[j]; ai[j] = ai[i]; ai[i] = ti;
		}

	for (k=nv2; k<j; k /= 2)
		j -= k;
	j += k;
	}

le = 1;
for (l = 1; l <= m; l++) {
	le += le; le1 = le/2;
	ur = 1.0; ui = 0.0;

	angle = PI/le1;
	wr = cos(angle); wi = sin(angle);
	for (j = 1; j <= le1; j++) { 
		for (i=j; i <= n; i += le) {
			ip = i + le1;
			tr = ar[ip]*ur - ai[ip]*ui;
			ti = ar[ip]*ui + ai[ip]*ur;
			ar[ip] = ar[i] - tr;
			ai[ip] = ai[i] - ti;
			ar[i]  = ar[i] + tr;
			ai[i]  = ai[i] + ti;
			}
		tr = ur*wr - ui*wi;
		ti = ur*wi + ui*wr;
		ur = tr;
		ui = ti;
		}
	}

return(0);
}


∂06-Jul-82  1739	Mabry Tyson <Tyson at SRI-AI> 	Re: Progress  
Date:  6 Jul 1982 1701-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: Re: Progress 
To: RPG at SU-AI

I'm a little confused about the message you just sent out about the status
of timings.

First off, all the examples I have sent in (this year) have been from
the same lisp, UT's version of UCI-Lisp.  It should be properly called
UCI-Lisp, with perhaps a footnote saying there are some slight differences
between this version and others but the timings should be almost identical.

Secondly, I think I have sent you results of all the problems except the
ones sent out today.  I've already archived May's mail but the headers show
I sent messages on TAK, MAS (Masinter's TAK), and FRPOLY on May 6.
On May 10, I sent another note on MAS and also one on the PUZZLE.  On
May 11, I sent a note on the timings of FFT.  If you have lost some of those
messages I can retrieve them and resend them.
-------

1. Re: confusion between the terms UTLISP 5.1 and UCILISP (Texas version). I
now have these marked as synonyms, correct?

2.  Excellent. I looked through my archives and found your timings. Seems
I archived them before recording them. Your line of progress now looks like:

------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------
Tyson	    |  |  |  |  |  |  |  |  |  |  |  |  |  | 
20(UTLISP)  | X| X| X| X| X| X|  |  |  | X|  |  |  | 
------------+--+--+--+--+--+--+--+--+--+--+--+--+--+-------------------------

Notes. `u' means uncached times were taken as well.

1. SCCPP
2. TAK
3. FRPOLY
4. TAKL
5. TAKR
6. PUZZLE
7. DERIV
8. DDERIV
9. FDERIV
10. FFT
∂07-Jul-82  1140	RPG  	FFT 
To:   kim.fateman at UCB-C70, "#TIMING.MSG[TIM,LSP]" at SU-AI  
Yes, the code you sent me yesterday is the FFT benchmark I refer to. I do not
have any timings in Franz from you people directly, but I do have Barrow's
report of Vax machines using Franz, I believe. Do you suggest I trust his
figures?
			-rpg-

∂07-Jul-82  1319	Kim.fateman at Berkeley 	Re:  FFT  
Date: 7 Jul 1982 13:14:12-PDT
From: Kim.fateman at Berkeley
To: RPG@SU-AI
Subject: Re:  FFT
Cc: Kim.fateman@Berkeley

I am not inclined to tune the FFT for Franz, simply because it
is not really what the Franz compiler ("Liszt") was designed to do
a good job on.  The philosophical reason is that Franz was set up
to have convenient links to
C and Fortran, and FFT or FFT-like calculations are, in my view,
inappropriate benchmarks for a lisp compiler specifically designed
to ignore numerical optimization.  How well does interlisp do on
bignumber arithmetic?  How well does maclisp load Fortran?
Should we test vectors and/or hunks?

This is not to say Franz's compiler could not be hacked to do fast
arithmetic.  We offered to take money from Fairchild and other
places to support a Franz numeric-compiler project.  They did not respond.

Thus, I would trust Barrow's times, pretty much.  As I mentioned, I
think we got the times down slightly (maybe 30% better), but that's
not significant.

Fine. I understand the design philosophy that you outlined, and
it will appear in my final report concerning Franz where the numeric
benchmarks are discussed. I'm not sure I agree with that philosophy since
the desirability of a uniform language for all of one's programming
needs strikes me as primary.

In any event, I will also report the C times and discuss the linkage
mechanism.
			-rpg-
∂07-Jul-82  1811	Kim.fateman at Berkeley 	Re:  FFT  
Date: 7 Jul 1982 18:05:44-PDT
From: Kim.fateman at Berkeley
To: RPG@SU-AI
Subject: Re:  FFT

I agree that it would be nice to have all things in one language for
uniformity, but only if it could be done well.

∂13-Jul-82  2329	RPG  	Errors   
To:   lisptranslators at SU-AI   
There were several errors in the benchmarks DDERIV and FDDERIV,
which Mabry Tyson pointed out. Please amend DDERIV to contain the
following in place of what I sent:

(DEFUN (TIMES DERIV) (A)
	(LIST 'TIMES (LIST 'TIMES A)
		(CONS 'PLUS (MAPCAR 'DER1 A))))

and FDDERIV to contain:

(DEFUN (TIMES DERIV DERIV) (A)
	(LIST 'TIMES (LIST 'TIMES A)
		(CONS 'PLUS (MAPCAR 'DER1 A))))

			-rpg-

∂13-Jul-82  2348	Mabry Tyson <Tyson at SRI-AI> 	Re: Errors    
Date: 13 Jul 1982 2346-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: Re: Errors   
To: RPG at SU-AI
cc: lisptranslators at SU-AI
In-Reply-To: Your message of 13-Jul-82 2329-PDT

The original DDERIV and FDDERIV are in error but I was a little hasty
in my correction.  The proper corrections are:


(DEFUN (TIMES DERIV) (A)
	(LIST 'TIMES (CONS 'TIMES A)
		(CONS 'PLUS (MAPCAR 'DER1 A))))


(DEFUN (TIMES DERIV DERIV) (A)
	(LIST 'TIMES (CONS 'TIMES A)
		(CONS 'PLUS (MAPCAR 'DER1 A))))



	Mabry
-------

∂13-Jul-82  1817	Mabry Tyson <Tyson at SRI-AI> 	Re: Symbolic Derivative (2)  
Date: 13 Jul 1982 1815-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: Re: Symbolic Derivative (2) 
To: RPG at SU-AI
In-Reply-To: Your message of 6-Jul-82 1613-PDT

Did you know that the original DERIV and the second version do not
generate the same answers?

To correct it, use

(DEFUN (TIMES DERIV) (A)
	(LIST 'TIMES (LIST 'TIMES A)
		(CONS 'PLUS (MAPCAR 'DER1 A))))

-------

∂16-Jul-82  0012	Mabry Tyson <Tyson at SRI-AI> 	DERIV, DDERIV, FDDERIV results    
Date: 16 Jul 1982 0003-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: DERIV, DDERIV, FDDERIV results
To: rpg at SU-AI

Results for DERIV, DDERIV, FDDERIV for UCILISP (UT's version).

All timings are on SRI-AI's 2060 with load average around 0.2.

Notes on coding of the programs:
UCILISP open codes MAPCAR if the function is specified by a
LAMBDA expression but does not if the function is a defined function.
Therefore, (MAPCAR 'FOO BAR) was translated into (MAPCAR (FUNCTION
(LAMBDA (X) (FOO X))) BAR) to keep in the spirit of the open coded
version sent out.

DDERIV and FDDERIV involve calling a function which is the value of
a local variable.  In UCILISP this may be done by simply calling
(FOO ...) where FOO is the local variable rather than fooling with
FUNCALL or SUBRCALL.  This format handles both cases.  With (NOUUO
NIL), these function calls do not become direct jumps (because the
value may change next time).

There was a slight problem in having the compiler produce SUBR code
to be stored under a different property (DERIV).  It could be done
but not conveniently.  So I just compiled the code as SUBR's and
editted the LAP code to make it load onto the DERIV property.

DDERIV and FDDERIV were the version that had the (CONS 'TIMES A) in
the definition for TIMES.

The loop was a PROG which had 5 calls to DERIV and looped 1000 times.
The function that did this was compiled to minimize the overhead.
In order to compute the overhead, I had a similar loop that called
a dummy function that just returned its arguments.  Its cost was
about 0.21 seconds for the (NOUUO T) case and about 0.04 seconds
for the (NOUUO NIL) case.  I did NOT subtract these out in the following
results (but I feel they should be).

Each test run produced about 265000 conses and I had about 150000 free words.
I did a GC before each run to keep things as constant as possible.

Function	(NOUUO T)		(NOUUO NIL)

DERIV		14.879-0.875 (.213)	4.451-0.868 (.043)
DDERIV		16.250-0.856 (.198)	4.983-0.855 (.044)
FDDERIV		16.073-0.873 (.211)	4.857-0.871 (.028)
DDERIV*		18.171-1.742 (.212)	6.091-1.717 (.043)

The format of the times are
		total-gc (dummy)
where total is the total CPU time (including GC), GC is the amount used for
garbage collection and dummy is the amount of time used by the dummy loop.  I
believe the clock ticks about every 0.015 seconds which explains the
difference between the .028 and .044 dummy times (one less tick).

Explanation of results:
I believe the second and third to be slower because the property list of
the variable (whose value was the property name) had to be searched twice,
first for a function property and then for the value of the variable.
Then, for the DDERIV case, another property list had to be searched to
find the function definition.

The DDERIV* case is one in which a FUNCALL is used.  In UCILISP this
results in an extra CONS and a call to APPLY* which then does everything
done above.  As you can see, it is slower.
-------

∂19-Jul-82  0615	ACORREIRA at BBNA 	address change  
Date: 19 Jul 1982 0914-EDT
From: ACORREIRA at BBNA
Subject: address change
To: lisptranslators at SU-AI

Please change my address to acorreira@bbna.  Thanks.

Alfred Correira (correira@utexas-11)

-------

∂09-Jul-82  2103	Martin.Griss <Griss at UTAH-20> 	Latest TAK #'s   
Date:  9 Jul 1982 2159-MDT
From: Martin.Griss <Griss at UTAH-20>
Subject: Latest TAK #'s
To: rpg at SU-AI
cc: griss at UTAH-20

Same old TAK as before, latest PSL on APollo's
	Utah 8Mz, 512K Apollo :2644ms (vs 1292ms on VAX, INUMs)
	10Mz, 1Mb Apollo with Cache :1679ms

Just done this evening, CRAY-1 Syslisp, not fully tested,
	TAK <48ms. (We have not optimized CMACRO's yet, but
	            have run TAK, factorial, etc).
-------

∂16-Jul-82  0012	Mabry Tyson <Tyson at SRI-AI> 	DERIV, DDERIV, FDDERIV results    
Date: 16 Jul 1982 0003-PDT
From: Mabry Tyson <Tyson at SRI-AI>
Subject: DERIV, DDERIV, FDDERIV results
To: rpg at SU-AI

Results for DERIV, DDERIV, FDDERIV for UCILISP (UT's version).

All timings are on SRI-AI's 2060 with load average around 0.2.

Notes on coding of the programs:
UCILISP open codes MAPCAR if the function is specified by a
LAMBDA expression but does not if the function is a defined function.
Therefore, (MAPCAR 'FOO BAR) was translated into (MAPCAR (FUNCTION
(LAMBDA (X) (FOO X))) BAR) to keep in the spirit of the open coded
version sent out.

DDERIV and FDDERIV involve calling a function which is the value of
a local variable.  In UCILISP this may be done by simply calling
(FOO ...) where FOO is the local variable rather than fooling with
FUNCALL or SUBRCALL.  This format handles both cases.  With (NOUUO
NIL), these function calls do not become direct jumps (because the
value may change next time).

There was a slight problem in having the compiler produce SUBR code
to be stored under a different property (DERIV).  It could be done
but not conveniently.  So I just compiled the code as SUBR's and
editted the LAP code to make it load onto the DERIV property.

DDERIV and FDDERIV were the version that had the (CONS 'TIMES A) in
the definition for TIMES.

The loop was a PROG which had 5 calls to DERIV and looped 1000 times.
The function that did this was compiled to minimize the overhead.
In order to compute the overhead, I had a similar loop that called
a dummy function that just returned its arguments.  Its cost was
about 0.21 seconds for the (NOUUO T) case and about 0.04 seconds
for the (NOUUO NIL) case.  I did NOT subtract these out in the following
results (but I feel they should be).

Each test run produced about 265000 conses and I had about 150000 free words.
I did a GC before each run to keep things as constant as possible.

Function	(NOUUO T)		(NOUUO NIL)

DERIV		14.879-0.875 (.213)	4.451-0.868 (.043)
DDERIV		16.250-0.856 (.198)	4.983-0.855 (.044)
FDDERIV		16.073-0.873 (.211)	4.857-0.871 (.028)
DDERIV*		18.171-1.742 (.212)	6.091-1.717 (.043)

The format of the times are
		total-gc (dummy)
where total is the total CPU time (including GC), GC is the amount used for
garbage collection and dummy is the amount of time used by the dummy loop.  I
believe the clock ticks about every 0.015 seconds which explains the
difference between the .028 and .044 dummy times (one less tick).

Explanation of results:
I believe the second and third to be slower because the property list of
the variable (whose value was the property name) had to be searched twice,
first for a function property and then for the value of the variable.
Then, for the DDERIV case, another property list had to be searched to
find the function definition.

The DDERIV* case is one in which a FUNCALL is used.  In UCILISP this
results in an extra CONS and a call to APPLY* which then does everything
done above.  As you can see, it is slower.
-------

∂18-Jul-82  0719	Martin.Griss <Griss at UTAH-20> 	[Martin.Griss <Griss at UTAH-20>: MAS times]   
Date: 18 Jul 1982 0815-MDT
From: Martin.Griss <Griss at UTAH-20>
Subject: [Martin.Griss <Griss at UTAH-20>: MAS times]
To: rpg at SU-AI
cc: griss at UTAH-20

What was "offgial" MAS times elsewhere.
                ---------------

Date: 23 May 1982 0004-MDT
From: Martin.Griss <Griss at UTAH-20>
Subject: MAS times
To: rpg at SU-AI
cc: griss

ON version 3 PSL:
DEC-20, 2.568 secs
VAX/750 11.59 secs

Will look at see if I missed anything. What do you expect?
-------
-------

∂23-Jul-82  1519	Howard I. Cannon <HIC at SCRC-TENEX at MIT-MC> 	Timings for Symbolics LM-2 
Date: Friday, 23 July 1982, 18:16-EDT
From: Howard I. Cannon <HIC at SCRC-TENEX at MIT-MC>
Subject: Timings for Symbolics LM-2
To: rpg at SU-AI
Cc: hic at SCRC-TENEX at MIT-MC, dla at SCRC-TENEX at MIT-MC


These are the Symbolics LM-2 timings for the benchmarks.  They were done
by Dave Andre, whom I thank.  I hope they are useful.  We won't have any
3600 timings available until September, I think.  Let me know if these
results are satisfactory.  Also, I'd like to see a table of current
results so that I can figure out where things stand (numbers, as opposed
to completions).

--Howard


(DEFMACRO TIMING (NAME FORM)
  `(WITHOUT-INTERRUPTS
     (LET ((TIME (TIME:MICROSECOND-TIME)))
       (PROG1 ,FORM
	      (FORMAT T ,(STRING-APPEND "~&" NAME ": ~S seconds")
		      (// (- (TIME:MICROSECOND-TIME) TIME) 1000000.0))))))

;; 1. SCCPP

(DEFUN TEST-SCCPP ()
  (LENGTH (TIMING "SCCPP"
		  (PAIRS A B () 'EQUAL () () ()))))

;; Compiled:  9.3 seconds.
;; Interpreted:  116.3 seconds.

;; This test seems to suffer from an incredibly cretinously written NCONC.
;; After adding NCONC optimizer to compiler (which will exist in 211):
;; Compiled:  7.9 seconds.

;; 2. TAK

(DEFUN TEST-TAK ()
  (TIMING "TAK" (TAK 18. 12. 6.)))

;; Compiled:  2.905 seconds
;; Interpreted:  291 seconds


;; 3. FRPOLY

(DEFUN TEST-FRPOLY (N)
  (LET (START RES1 RES2 RES3)
    (WITHOUT-INTERRUPTS
      (SETQ START (TIME:MICROSECOND-TIME))
      (PEXPTSQ R N)
      (SETQ RES1 (TIME:MICROSECOND-TIME))
      (PEXPTSQ R2 N)
      (SETQ RES2 (TIME:MICROSECOND-TIME))
      (PEXPTSQ R3 N)
      (SETQ RES3 (TIME:MICROSECOND-TIME)))
    (FORMAT T "~%Power= ~D. ~20T~S ~32T~S ~44T~S" N
	    (// (- RES1 START) 1000000.0)
	    (// (- RES2 RES1)  1000000.0)
	    (// (- RES3 RES2)  1000000.0))))

;; Compiled results
;; Power= 2.           0.016191    0.020865    0.021139
;; Power= 2.           0.01614     0.020716    0.020233
;; Power= 5.           0.175673    0.251539    0.236832
;; Power= 5.           0.20282     0.251422    0.236561
;; Power= 10.          2.202732    3.7338      3.143525
;; Power= 10.          2.293053    3.520599    2.725204
;; Power= 15.          18.589147   32.037923   22.587246
;; Power= 15.          18.547342   30.895854   22.432256


;; 4. TAKL

(DEFUN TEST-TAKF ()
  (TIMING "TAKF" (TAKF 18. 12. 6.)))

;; Compiled:  4.446 seconds.
;; Interpreted:  long.


;; 4A. TAKF (????)
;; Where did this come from?

(DEFUN TEST-TAKF ()
  (TIMING "TAKF" (TAKF 18. 12. 6.)))

;; Compiled:  4.446 seconds.
;; Interpreted:  long.


;; 5. TAKR
;; We don't seem to have this?


;; 6. PUZZLE

(DEFUN TIME-PUZZLE ()
  (TIMING "PUZZLE" (START)))

;; Compiled:  59.0 Seconds

;; Above could be faster if the crufty array references weren't function calls.


;; 7. DERIV

(DEFUN TEST-DERIV ()
  (TIMING "DERIV" (RUN)))

;; Compiled: 23.9 seconds.


;; 8. DDERIV

(DEFUN TEST-DDERIV ()
  (TIMING "DDERIV" (RUN)))

;; Compiled:  25.4 seconds.


;; 9. FDERIV

;; FDDERIV 3 shouldn't be any different on the LM-2 than DDERIV, if I understand
;; it correctly.


;; 10. FFT

;;; Sets up the two arrays
;---
(SETQ RE (FSYMEVAL (ARRAY RE FLONUM 1025.)))

(SETQ IM (FSYMEVAL (ARRAY IM FLONUM 1025.)))

(DEFUN TEST-FFT ()
  (TIMING "FFT" (LOOP REPEAT 10. DO (FFT 'RE 'IM))))

;; Compiled:  36.8 seconds.
;; Array references compile OK in this test.

∂10-Aug-82  1605	RPG  	MAS/TAKL 
To:   kim.jkf at UCB-C70
CC:   "#TIMING.MSG[TIM,LSP]" at SU-AI
A while back you sent me MAS (TAKL) results, but upon reading them
more carefully I think you have mislabelled TAKR as MAS. TAKR
used TAK0-TAK99. MAS was:

(defun listn (n)
    (cond
      ((= 0 n)
	nil)
      (t (cons n (listn (1- n))))))

(defun mas (x y z)
    (cond
      ((not (shorterp y x))
	z)
      (t (mas (mas (cdr x)
		   y z)
	      (mas (cdr y)
		   z x)
	      (mas (cdr z)
		   x y)))))

(defun shorterp (x y)
       (and y (or (null x)
		  (shorterp (cdr x)
			    (cdr y)))))

;benchmark is called

;(mas (listn 18) (listn 12) (listn 6))

Could you please check this against your recolllection? Thanks.
			-rpg-

∂04-Aug-82  1052	HIC at SCRC-TENEX 	[DLA: forwarded]
Date: Wednesday, 4 August 1982  12:26-EDT
From: HIC at SCRC-TENEX
To: rpg at sail
Subject: [DLA: forwarded]

Date: Saturday, 31 July 1982, 09:46-EDT
From: David L. Andre <DLA>
To:   HIC
cc:   DLA

While I was editing UCADR, I decided to microcode a two-argument NCONC.
With this microcoded NCONC, the SCCPP benchmark radically improved:

;; Old results:
;; Compiled:  9.3 seconds.
;; Interpreted:  116.3 seconds.

;; This test seems to suffer from an incredibly cretinously written NCONC.
;; After adding NCONC optimizer to compiler (which will exist in 211):
;; Compiled:  7.9 seconds.

;; With microcoded NCONC (System 211, UCADR 920)
;; Compiled: 6.0 seconds.

You may wish to forward this to the appropriate people.

∂27-Aug-82  1034	Masinter at PARC-MAXC 	old benchmarks out of the past  
Date: 27-Aug-82 10:29:54 PDT (Friday)
From: Masinter at PARC-MAXC
Subject: old benchmarks out of the past
To: RPG@SU-AI




------------------------------
Mail-from: Arpanet host USC-ISIB rcvd at 7-MAY-81 0808-PDT
Date:  7 May 1981 0809-PDT
From: LYNCH at USC-ISIB
Subject: VAX 750/ VAX 780 benchmark data
To:   Balzer, Cohen, Ellis, DDyer, Koomen, RBates,
To:   Masinter at PARC

Mail-from: ARPANET host SRI-AI rcvd at 7-May-81 0201-PDT
Date:  6 May 1981 2331-PDT
From: KASHTAN
Subject: VAX-11/750 <--> VAX-11/780 benchmarks
To: quam, witkin, hanson, jirak, wilcox, meyers, larson, kennard, sad,
    heathman at SRI-AI, ryland at SRI-AI, burback at SRI-AI, mcghie,
    sword
Remailed-date:  7 May 1981 0139-PDT
Remailed-from: SAD at SRI-AI (Stephen Dougherty)
Remailed-to: Lynch at SRI-AI, Mcgreal at SRI-AI

Here are the complete results of the 11/750 - 11/780 benchmarks.  Looks
like the 11/750 gets to memory faster (and is optimized w.r.t. getting
to memory faster) than the 11/780.  It loses VERY badly when it comes to
actually executing instructions, as the execution unit is very much slower
in the 750 than the 780.  This is particulary born out by the execution
benchmarks for the convolution program in various languages.  The languages
vary from BLISS (which keeps the whole world in registers) to LISP (which
keeps the whole world in memory).  Even though the 750 gets to memory faster,
it doesn't do you much good when it takes so long to process what you got
from memory (even a simple move).
The 750 does a good job of operand processing (especially given its relative
CPU speed) but this doesn't seem to help too much in actual program execution,
as on the 750 the execution time seems to be dominated by the instruction
execution time rather than the on the operand fetch time (as is the case on
the 780).
A note on the Richard Fateman's 750 benchmarks.  Seems that all they did was
run a Liszt (Franz Lisp compiler) compile on one of Bell Labs UNIX systems.
A compiled Franz Lisp program (as Liszt is) tends to be very heavy on CALLS
and on moving things around in memory (i.e. to and from the stack).  No
intermediate results are kept in registers at all.  What this does is skew
the results somewhat towards a faster looking 750 (since the 750 will benefit
from any benchmarks that are heavily involved in memory referencing).  What
he reported was that the 750 was indeed about 60% of the 780 in this case.
PLEASE NOTE that large IU and VLSI programs, while we might consider them
memory intensive, are really virtual memory intensive (i.e. have very large
working sets).  This is not the same as the above benchmark.  Most IU and
VLSI programs when compiled with good compilers will tend to do a small amount
of computation (even just an add or multiply) with each datum fetched from
memory.  You can expect the performance of the 750 relative to the 780 to
drop quite a bit from the above mentioned 60%.  It should become very much
like the following convolution benchmarks (a very good example of a virtual
memory intensive program that does a small amount of computation with each
datum fetched).  An interesting side note:  CARs and CDRs in compiled lisp
tend to come out as   "movl  x(r),dst"  (which executes at about 60% of 780
speed).
My feeling from playing with the two systems is that the 750 is best used
as an entry level system for those sites which need to acquire the smallest
possible VAX configuration (i.e. the lowest possible price).  An entry level
750 goes for about $90K while an entry level 780 system with approximately
the same configuration would go for about $140K.  Clearly there is a big
difference here (almost all of it in the price of the CPU).  As the systems
get larger the price advantage goes away (as the price will note be dominated
by the CPU price, which is the case in the smaller systems, but by memory /
peripheral prices).  Here you will save about $50K on a $250K system and get
less than 1/2 the machine.
I am somewhat confused by the divide instruction timings.  There are a couple
of possibilities here - 1) a stupidity in the 780 was fixed in the 750
			2) I muffed the 780 test (don't thing so, as I
					triple checked it)
			3) I muffed the 750 test.
I find it incredible that MULL is 4x as fast on the 780 while DIVL is a bit
slower on the 780.  I did not do any floating point tests, as there is no
floating point accelerator on the 750.
David

-------------------------------------------------------------------------------

VAX-11/750 vs VAX-11/780
------------------------

Simple 2D convolution program:

		11/750		11/750 (% of 11/780)		11/780
		------		--------------------		------

BLISS-32	5.45 sec		45%			2.5 sec

VMS PASCAL	12.9 sec		38%			4.9 sec

UNIX C		11.3 sec		44%			5.0 sec

UNIX F77	39.9 sec		29%			11.4 sec

Compiled
Franz Lisp	76.5 sec		53%			41.0 sec


Instruction timings:

movl r,r	1000nSec		40%			400nSec
movl x(PC),r	1760nSec		45%			800nSec
movl r,x(PC)	2300nSec		52%		       1300nSec
movl (r),r	1330nSec		60%		        800nSec

Addressing modes:

r		0nSec			--			0nSec
# (short)	0nSec			--			0nSec
# (long)	700nSec			57%			400nSec
(r)		330nSec		       120%			400nSec
(r)+		330nSec		       120%			400nSec
-(r)		330nSec		       120%			400nSec
@(r)+		900nSec		       111%		       1000nSec
x(r)		500nSec			80%			400nSec
@x(r)		1150nSec		86%		       1000nSec
[r]		1000nSec		60%			600nSec

Instructions:

MOVL		1000nSec		40%			400nSec
ADDL
SUBL
etc

MULL		8000nSec		25%			2000nSec
DIVL		8000nSec	       112%			9000nSec
CALLx/RET	20000nSec+1800nSec/register			15000nSec+
				       100%			2000nSec/Reg
JSB/RSB		6000nSec		50%			3000nSec
SOBGxx		2000nSec		50%			1000nSec
ACBL		5600nSec		71%			4000nSec
MOVC3		350nSec/byte	       107%			375nSec/byte

3 operand	+500nSec		40%			+200nSec
instructions
-------
-------

----------------------------------------------------------------

∂30-Sep-82  1218	James Bennett <csd.Bennett at SU-SCORE> 	new timings   
Date: 30 Sep 1982 1207-PDT
From: James Bennett <csd.Bennett at SU-SCORE>
Subject: new timings
To: rpg at SU-AI
Stanford-Phone: (415) 497-2225
Home-Phone: (415) 322-2233

Dick,
	Here they are:
6←(FLENGTH (TIME (PAIRS A B NIL 'EQUAL) 1 0]

collecting lists
3234, 10402 free cells

collecting lists
2842, 10010 free cells

collecting lists
3984, 10128 free cells

collecting lists
4052, 10196 free cells

collecting lists
16161, 16161 free cells
51493 conses
4.845 seconds
22.672 seconds, real time
2592
7←USE 4 FOR 1 IN 6

collecting lists
25873, 25873 free cells

collecting lists
9746, 10258 free cells

collecting lists
15907, 15907 free cells

collecting lists
6358, 10454 free cells

collecting lists
3642, 10298 free cells

collecting lists
4118, 10262 free cells

collecting lists
5956, 10052 free cells

collecting lists
35319, 35319 free cells

collecting lists
11874, 11874 free cells

collecting lists
38897, 38897 free cells

collecting lists
12308, 12308 free cells
205972/4 = 51493 conses
18.741/4 = 4.68525 seconds
200.712 seconds, real time
2592
10←DRIBBLE]
-------

∂03-Oct-82  2050	James Bennett <csd.Bennett at SU-SCORE> 	initial timings    
Date:  3 Oct 1982 2046-PDT
From: James Bennett <csd.Bennett at SU-SCORE>
Subject: initial timings
To: rpg at SU-AI
Stanford-Phone: (415) 497-2225
Home-Phone: (415) 322-2233

Dick,
	I got it to work. You are right about Interlisp array indicies
starting at 1 rather than 0. I thought that the ADD1's would unduly
increase the running times but I think that this effect is in the noise
after looking at the following results. First I block compiled the functions,
timed start, and then did the same with the uncompiled versions. As
you can see below, compiling had little effect. I would say that array
access sucks in Interlisp. I suggest that you run the breakdown package
on this function just to see were it is spending its time, jim

NIL
←(TIME (START) 1 0]   ; bcompl'd version

success in 2005 trials
0 conses
1481.138 seconds
2782.667 seconds, real time
NIL
←LOAD(PUZZLE]
FILE CREATED  3-Oct-82 13:00:20
PUZZLECOMS
(START redefined)
(DEFINE-ARRAY redefined)

collecting arrays
7820, 10380 free cells
<CSD.BENNETT>PUZZLE..10
←REDO TIME  ; expr version

success in 2005 trials
0 conses
1481.692 seconds
2397.734 seconds, real time
NIL
←PL START
CODE : #176777
EXPR : (LAMBDA NIL (& 0) --)
←UNSAVEDEF(START CODE]
CODE
←REDO TIME

success in 2005 trials
0 conses
1479.684 seconds
2426.698 seconds, real time
NIL
←DRIBBLE]
-------

∂06-Oct-82  1907	James Bennett <csd.Bennett at SU-SCORE> 	bug in puzzle 
Date:  6 Oct 1982 1858-PDT
From: James Bennett <csd.Bennett at SU-SCORE>
Subject: bug in puzzle
To: rpg at SU-AI
Stanford-Phone: (415) 497-2225
Home-Phone: (415) 322-2233

Dick,
	Brought the new file over, eliminated an extra parenthesis in the
DEFINEQ (before (REMOVE! (LAMBDA --))), and gave it a whirl:

NIL
←(TIME (START) 1 0]
ADDI {in FIT} -> ADD1 ?  ...yes

NON-NUMERIC ARG
NIL
in IEQP

(broken)
:EDITF↔\\EDITF BTV
IEQP
COND

   I 0
*PROG*LAM
PROG

   K NIL
(LAMBDA (K) (PROG & & LOOP & & & &) 0)

   J 78
TRIAL
COND

   KOUNT 0
   N 78
   M 74
(LAMBDA (M N KOUNT) (COND & &) (COND & &) (TERPRI))
START
EVAL
TIME
**TOP**

:
(broken)
:EDITF(FIT]
edit
*PP
[LAMBDA (I J)
  ([LAMBDA (END)
      (PROG (K)
            (SETQ K 0)
        LOOP(COND
              ((IGREATERP K END)
                (RETURN T)))
            [COND
              ((*ELT PX I (ADD1 K))
                (COND
                  ((ELT PUZZLE (IPLUS J K))
                    (RETURN NIL]
            (SETQ K (ADD1 K))
            (GO LOOP]
    (IDIFFERENCE (ELT PIECEMAX (ADD1 I))
                 1]
*OKO
OKO=OK O ?  Yes
FIT
u.b.a.
   (O broken)
:EDITF(TRAIL]
=TRIAL
edit
*PP
[LAMBDA (J)
  ((LAMBDA (K)
      (PROG (I)
            (SETQ I 0)
        LOOP(COND
              ((IGREATERP I TYPEMAX)
                (SETQ KOUNT (ADD1 KOUNT))
                (RETURN NIL)))
            [COND
              ((NOT (IEQP (ELT PIECECOUNT (ELT CLASS I))
                          0))
                (COND
                  ((FIT I J)
                    (SETQ K (PLACE I J))
                    (COND
                      ((OR (TRIAL K)
                           (IEQP K 1))
                        (SETQ KOUNT (ADD1 KOUNT))
                        (RETURN T))
                      (T (REMOVE! I J]
            (SETQ I (ADD1 I))
            (GO LOOP))
      0]
*
*OK
TRIAL
:DRIBBLE]


The new version is PUZZLE.2, jim
-------

∂09-Oct-82  0413	JonL at PARC-MAXC 	Order of magnitude on PUZZLE   
Date: 9 Oct 1982 04:13 PDT
From: JonL at PARC-MAXC
Subject: Order of magnitude on PUZZLE
To: RPG@SU-AI
cc: LispCore↑, Raim.EOS, Pahlavan.EOS

I've got my little "CommonLisp" array package coded up now (it's on
the ADDARITH file), which uses "margin arrays" for multi-dimension
arrays.  Using that, rather than ELT and SETA, and with DisplayDown,
I ran the Baskett benchmark PUZZLE on the Dolphin you used yesterday,
and got a time of 71.4 seconds.  

This compares with the DisplayUp ELT/SETA time you got of 616. seconds.

Didn't you say that the LM-2 time was around 91 seconds?

P.S. -- Dorado time was about 11.4 seconds
P.P.S. -- This ""CommonLisp" array package needs a little more debugging,
    but some version of the "fast" AREF might well be useful to others,
    just as the "fast" ELT is used in Interlisp-10.



∂05-Oct-82  1352	Masinter at PARC-MAXC 	a parser benchmark    
Date:  5-Oct-82 13:11:19 PDT (Tuesday)
From: Masinter at PARC-MAXC
Subject: a parser benchmark
To: RPG@SU-AI


------------------------------
Mail-from: Arpanet host CMU-10A rcvd at 5-OCT-82 1150-PDT
Date:  5 October 1982 1448-EDT (Tuesday)
From: James.Morris at CMU-10A
To: masinter at PARC-MAXC, rovner at PARC-MAXC
Subject: LIssp program
Message-Id: <05Oct82 144814 JM90@CMU-10A>

Here is the maclisp program as it runs here.  An earlier version of
it came in second in a programming contest (based on running time, not style!)

(COMMENT --CONTENTS-- ALS CLOSED CREATESET GPAT POS VALUE WORKING ADD
         ALLSETS ASSIGN CCONC CLEARLIST CLEARVSETS CNUM COMPILE CONCAT
         CONCAT1 CONTINUEMATCH CPAT CTERM END EXS FSM INSERTQ
         ITERATIONS MAKEDM MAKENDM MAKEREADY MATCH MERGE MVSET
         PERMUTES PPAT PSS REMQ RUNREADY SHIFTQS SKIPNILS SPAN STAR
         STAR1 STARTMATCH TEST TIEUP UNIQUESET TEST1 GLITCH *RSET NOUUO
         )

(DECLARE (MAPEX T)
         (FIXSW T)
         (MACROS NIL))

(SORT NIL 'CNUM)

(DECLARE (SPECIAL STATESETS SS L2 CLEARLIST READYQUEUE
          RECURSIONPOSSIBLE EXS INTERATIONS ARC ITERATIONS OK))

(DEFUN ALS MACRO (X) (LIST 'CDR (CADR X))) 

(DEFUN CLOSED MACRO (X) (LIST 'NULL (CADR X))) 

(DEFUN CREATESET MACRO (X) '(LIST NIL)) 

(DEFUN GPAT MACRO (X)
  (SUBST (CADR X)
         'W
         '(PROG (Z)
            (SETQ Z W)
            (RETURN (COND ((OR (ATOM W) (CDR W)) W)
                          (T (CAR W))))))) 

(DEFUN POS MACRO (X) (LIST 'CAR (CADR X))) 

(DEFUN VALUE MACRO (X) (LIST 'CONS (CADR X) (CADDR X))) 

(DEFUN WORKING MACRO (X) (LIST 'NULL (LIST 'CAR (CADR X)))) 

(DEFUN ADD (V SINK)
  (PROG NIL
   L    (COND ((WORKING SINK) (GO M))
              ((EQUAL V (CAR SINK)) (RETURN NIL)))
        (SETQ SINK (CDR SINK))
        (GO L)
   M    (DO PS
          (CDR SINK)
          (CDR PS)
          (NULL PS)
          (MAKEREADY (CONS (CAAR PS) (CONS V (CDAR PS)))))
        (RPLACA SINK V)
        (RPLACD SINK (CONS NIL (CDR SINK))))) 

(DEFUN ALLSETS (SL)
  (PROG NIL
    (DO Y
     SL (CDR Y)
        (NULL Y)
        (COND ((NOT (ATOM (CAR Y)))
               (DO W
                 (CAR Y)
                 (CDR W)
                 (NULL W)
                 (COND ((NULL (CAAR W)) (INSERTQ (CDAR W) SL)))))))
    (RETURN SL))) 

(DEFUN ASSIGN (V ANS ST AT)
  (ADD (VALUE (POS V)
              (MERGE (LIST (CONS AT (SPAN ST (POS V)))) (ALS V)))
       ANS)) 

(DEFUN CCONC (PAT)
  (PROG (P2)
    (COND ((NULL PAT) (RETURN (CONS NIL NIL))))
    (SETQ P2 (CCONC (CDR PAT)))
    (RETURN (COND ((CTERM (CAR PAT))
                   (CONS (CONS (CAR PAT) (CAR P2)) (CDR P2)))
                  ((CAR P2)
                   (DISPLACE PAT
                             (CONS (CAR PAT)
                                   (CONS (MAKEDM (CAR P2))
                                         (CDR P2))))
                   (CONS NIL PAT))
                  (T (CONS NIL PAT)))))) 

(SETQ CLEARLIST NIL)

(DEFUN CLEARVSETS NIL
  (PROG NIL
    (DO CLEARLIST
     CLEARLIST (CDR CLEARLIST)
        (NULL CLEARLIST)
        (REMPROP (CAR CLEARLIST) 'VSET))
    (SETQ CLEARLIST NIL))) 

(DEFUN CNUM (A B) (LESSP (MAKNUM A) (MAKNUM B))) 

(DEFUN COMPILE (PAT)
  (COND ((CPAT PAT) (DISPLACE PAT (MAKEDM PAT)))
        (T PAT))) 

(DEFUN CONCAT (V ANS RPAT)
  (MVSET (STARTMATCH (POS V) RPAT) 'CONCAT1 (LIST ANS (ALS V)))) 

(DEFUN CONCAT1 (V ANS FALS)
  (ADD (VALUE (POS V) (MERGE FALS (ALS V))) ANS)) 

(DEFUN CONTINUEMATCH (ST PAT ANS)
  (COND ((EQ (CAR PAT) '!)
         (DO P
           (CDR PAT)
           (CDR P)
           (NULL P)
           (MVSET (STARTMATCH ST (GPAT (CAR P))) 'ADD (LIST ANS))))
        ((EQ (CAR PAT) '*)
         (COND ((EQUAL (CDR PAT) '(?))
                (DO ST
                 ST (CDR ST)
                    (NULL ST)
                    (ADD (VALUE ST NIL) ANS))
                (ADD (VALUE NIL NIL) ANS))
               (T (ADD (VALUE ST NIL) ANS)
                  (MVSET (STARTMATCH ST (GPAT (CDR PAT)))
                         'ADD
                         (LIST ANS))
                  (MVSET (CDR ANS)
                         'STAR
                         (LIST ANS (GPAT (CDR PAT)))))))
        ((EQ (CAR PAT) ':=)
         (MVSET (STARTMATCH ST (GPAT (CDDR PAT)))
                'ASSIGN
                (LIST ANS ST (CADR PAT))))
        ((EQ (CAR PAT) '&)
         (CONTINUEMATCH ST
                        (DISPLACE PAT
                                  (GLITCH (PERMUTES (CDR PAT))))
                        ANS))
        ((EQ (CAR PAT) '$) (FSM (CDR PAT) ST ANS))
        ((NULL (CDR PAT))
         (MVSET (STARTMATCH ST (CAR PAT)) 'ADD (LIST ANS)))
        (T (MVSET (STARTMATCH ST (CAR PAT))
                  'CONCAT
                  (LIST ANS (GPAT (CDR PAT))))))) 

(DEFUN CPAT (PAT)
  (PROG (P1)
    (SETQ P1 (CCONC PAT))
    (RETURN (COND ((NULL (CDR P1)) T)
                  ((NULL (CAR P1)) NIL)
                  (T (RPLACA P1 (MAKEDM (CAR P1)))
                     (DISPLACE PAT P1)
                     NIL))))) 

(DEFUN CTERM (TE)
  (PROG (OK W)
    (COND ((ATOM TE)
           (SETQ OK (GET TE 'REWRITE:))
           (COND ((NULL OK) (RETURN T)))
           (SETQ W (GET TE 'COMPILABLE))
           (COND (W (RETURN (CAR W))))
           (COND ((GET TE 'RECURSIVE) (RETURN NIL)))
           (PUTPROP TE 'MAYBE 'RECURSIVE)
           (PUTPROP TE (LIST (SETQ OK (CPAT OK))) 'COMPILABLE)
           (REMPROP TE 'RECURSIVE)
           (RETURN OK))
          ((EQ (CAR TE) '!)
           (SETQ OK T)
           (MAPC (*FUNCTION (LAMBDA (P)
                              (SETQ OK (AND OK (CPAT P)))))
                 (CDR TE))
           (RETURN OK))
          ((EQ (CAR TE) '*) (RETURN (CPAT (CDR TE))))
          ((EQ (CAR TE) '&)
           (MAP (FUNCTION (LAMBDA (P)
                            (COND ((AND P (CPAT (CAR P)))
                                   (RPLACA P (MAKEDM (CAR P)))))))
                (CDR TE))
           (RETURN NIL))
          ((EQ (CAR TE) ':=)
           (COND ((CPAT (CDDR TE))
                  (RPLACD (CDR TE) (MAKEDM (CDDR TE)))))
           (RETURN NIL))))) 

(SETQ END '(NIL))

(SETQ EXS
      '(((MATCH '(FOO BAR) '(FOO (:= X BAR))) (T ((X BAR))))
        ((MATCH '(FEE FIE FO FUM) '(FEE (:= Y ?) (:= X ? ?)))
         (T ((Y FIE) (X FO FUM))))
        ((PROGN (REMPROP '<GETINFO> 'COMPILABLE)
                (REMPROP '<GETINFO1> 'COMPILABLE)
                (PUTPROP '<GETINFO>
                         '((! (<GETINFO1>) (TYPE) (PRINT)))
                         'REWRITE:)
                (PUTPROP '<GETINFO1>
                         '((! (GIVE) (SHOW) (TELL)) ME)
                         'REWRITE:)
                (MATCH '(SHOW ME THE DATE OF THE FILE)
                       '(<GETINFO> THE
                                   (:= PROP ?)
                                   (* ?)
                                   (:= OBJ (* ?)))))
         (T ((OBJ OF THE FILE) (PROP DATE))
            ((OBJ THE FILE) (PROP DATE))
            ((OBJ FILE) (PROP DATE))
            ((OBJ) (PROP DATE))))
        ((MATCH '(FEE FIE FO FUM) '(FEE FUM FO FIE)) NIL)
        ((MATCH '(FEE FIE FO FUM) '(FEE (& (FUM) (FO) (FIE))))
         (T))
        ((MATCH '(FEE FIE FO FUM)
                '((! (? FIE FO) (? FIE)) (:= X ? ?)))
         (T ((X FO FUM))))
        ((MATCH '(TELL ME ABOUT THE FOO OF BAR)
                '(<GETINFO> (& ((* ABOUT)) ((* ALL)) ((* THE)))
                            THE
                            (:= X ?)
                            (! ((* @XYZZY@)) (OF) (IN))
                            (:= Y ?)))
         (T ((Y BAR) (X FOO))))
        ((PROGN (REMPROP 'BIGA 'COMPILABLE)
                (PUTPROP 'BIGA '((! (BIGA BIGA) (A))) 'REWRITE:)
                (MATCH '(A A A A A A) '(BIGA)))
         (T))))

(DEFUN FSM (M ST ANS)
  (PROG (X)
   L    (COND ((CAR M) (ADD (VALUE ST NIL) ANS)))
        (COND ((AND ST
                    (OR (SETQ X (ASSQ (CAR ST) (CDR M)))
                        (SETQ X (ASSQ '? (CDR M)))))
               (SETQ M (CDR X))
               (SETQ ST (CDR ST))
               (GO L))))) 

(DEFUN INSERTQ (X SET)
  (PROG NIL
   L    (COND ((EQ (CAR SET) X) NIL)
              ((NULL (CDR SET)) (RPLACD SET (LIST X)))
              (T (SETQ SET (CDR SET)) (GO L))))) 

(SETQ ITERATIONS 1.)

(DEFUN MAKEDM (RE)
  (PROG (STATESETS BPOS ARCLIST X)
    (SETQ X (GPAT RE))
    (COND ((AND (ATOM X) (NOT (GET X 'REWRITE:))) (RETURN X)))
    (UNIQUESET (ALLSETS (LIST (MAKENDM (CONS RE 'FINAL)))))
    (DO S
     STATESETS (CDR S)
        (NULL S)
        (SETQ ARCLIST (SHIFTQS (MAPCAN 'SKIPNILS (CAAR S)) NIL))
        (DO APOS
         ARCLIST (CDR APOS)
            (NULL APOS)
            (RPLACD (CAR APOS) (LIST (CDAR APOS)))
            (SETQ BPOS APOS)
         M  (COND ((NULL (CDR BPOS)) (GO END)))
            (COND ((EQ (CAADR BPOS) (CAAR APOS))
                   (INSERTQ (CDADR BPOS) (CDAR APOS))
                   (RPLACD BPOS (CDDR BPOS)))
                  ((EQ (CAADR BPOS) '?)
                   (INSERTQ (CDADR BPOS) (CDAR APOS))
                   (SETQ BPOS (CDR BPOS)))
                  (T (SETQ BPOS (CDR BPOS))))
            (GO M)
         END (RPLACD (CAR APOS) (UNIQUESET (ALLSETS (CDAR APOS)))))
        (RPLACD (CAR S) ARCLIST))
    (RETURN (CONS '$ (CAR (TIEUP STATESETS)))))) 

(DEFUN MAKENDM (ARC)
  (COND ((ATOM (CAR ARC))
         (COND ((GET (CAR ARC) 'REWRITE:)
                (MAKENDM (CONS (GET (CAR ARC) 'REWRITE:) (CDR ARC))))
               (T (LIST ARC))))
        ((EQ (CAAR ARC) '!)
         (MAPCAN 'MAKENDM
                 (MAPCAR (*FUNCTION (LAMBDA (A) (CONS A (CDR ARC))))
                         (CDAR ARC))))
        ((EQ (CAAR ARC) '*)
         (LIST (CONS NIL
                     (PROG (X)
                       (SETQ X (LIST NIL))
                       (DISPLACE X
                                 (CONS (CONS NIL (CDR ARC))
                                       (MAKENDM (CONS (CDAR ARC) X))))
                       (RETURN X)))))
        ((EQ (CAAR ARC) '&)
         (MAKENDM (CONS (CAR (PERMUTES (CDAR ARC))) (CDR ARC))))
        ((NULL (CDAR ARC)) (MAKENDM (CONS (CAAR ARC) (CDR ARC))))
        (T (MAKENDM (CONS (CAAR ARC)
                          (MAKENDM (CONS (CDAR ARC) (CDR ARC)))))))) 

(DEFUN MAKEREADY (X) (SETQ READYQUEUE (CONS X READYQUEUE))) 

(DEFUN MATCH (ST PAT)
  (PROG (V WIN)
    (COND ((CAR PAT)
           (COMPILE PAT)
           (DISPLACE PAT (CONS NIL (CONS (CAR PAT) (CDR PAT))))))
    (SETQ READYQUEUE NIL)
    (CLEARVSETS)
    (SETQ RECURSIONPOSSIBLE NIL)
    (SETQ V (STARTMATCH ST (GPAT (CDR PAT))))
    (RUNREADY)
    (SETQ V
          (MAPCAN (FUNCTION (LAMBDA (X)
                              (COND ((NULL X) NIL)
                                    ((NULL (POS X))
                                     (SETQ WIN 'T)
                                     (COND ((ALS X)
                                            (LIST (ALS X)))
                                           (T NIL)))
                                    (T NIL))))
                  V))
    (COND (WIN (SETQ V (CONS 'T V))))
    (CLEARVSETS)
    (RETURN V))) 

(DEFUN MERGE (AL1 AL2)
  (COND ((NULL AL1) AL2)
        ((NULL AL2) AL1)
        ((LESSP (MAKNUM (CAAR AL1)) (MAKNUM (CAAR AL2)))
         (CONS (CAR AL1) (MERGE (CDR AL1) AL2)))
        (T (CONS (CAR AL2) (MERGE (CDR AL2) AL1))))) 

(DEFUN MVSET (VS PROC ENV)
  (PROG NIL
   L    (COND ((CLOSED VS) (RETURN NIL))
              ((WORKING VS)
               (COND (RECURSIONPOSSIBLE
                      (RPLACD VS (CONS (CONS PROC ENV) (CDR VS)))))
               (RETURN NIL)))
        (APPLY PROC (CONS (CAR VS) ENV))
        (SETQ VS (CDR VS))
        (GO L))) 

(DEFUN PERMUTES (L2)
  (COND ((NULL (CDR L2)) (CAR L2))
        (T (CONS '!
                 (MAPCAR (*FUNCTION (LAMBDA (I)
                                      (LIST I
                                            (PROG (R)
                                              (SETQ R (REMQ I L2))
                                              (RETURN (COND ((NULL (
                                 CDR R))
                      (CAR R))
                     (T (CONS '& R))))))))
                         L2))))) 

(DEFUN PPAT (PAT)
  (COND ((ATOM PAT) PAT)
        ((EQ (CAR PAT) '$) (CONS (CAR PAT) (PSS (CDR PAT))))
        (T (MAPCAR 'PPAT PAT)))) 

(DEFUN PSS (S)
  (PROG (SS)
    (SETQ SS (LIST S))
    (RETURN (MAPCAR (FUNCTION (LAMBDA (PR)
                                (CONS (MAKNUM PR)
                                      (CONS (CAR PR)
                                            (MAPCAR (*FUNCTION (LAMBDA
                                 (PR1)
                          (INSERTQ (CDR PR1) SS)
                          (CONS (CAR PR1) (MAKNUM (CDR PR1)))))
                                                    (CDR PR))))))
                    SS)))) 

(DEFUN REMQ (I L)
  (COND ((EQ (CAR L) I) (CDR L))
        (T (CONS (CAR L) (REMQ I (CDR L)))))) 

(DEFUN RUNREADY NIL
  (PROG (X)
   L    (COND ((NULL READYQUEUE) (RETURN NIL)))
        (SETQ X (CAR READYQUEUE))
        (SETQ READYQUEUE (CDR READYQUEUE))
        (APPLY (CAR X) (CDR X))
        (GO L))) 

(DEFUN SHIFTQS (L QL)
  (COND ((NULL L) QL)
        ((EQ (CAAR L) '?) (SHIFTQS (CDR L) (CONS (CAR L) QL)))
        (T (CONS (CAR L) (SHIFTQS (CDR L) QL))))) 

(DEFUN SKIPNILS (P)
  (COND ((ATOM P) NIL)
        (T (MAPCAN (FUNCTION (LAMBDA (P1)
                               (COND ((CAR P1)
                                      (LIST (CONS (CAR P1)
                                                  (CDR P1))))
                                     (T NIL))))
                   P)))) 

(DEFUN SPAN (F L)
  (COND ((EQ F L) NIL)
        (T (CONS (CAR F) (SPAN (CDR F) L))))) 

(DEFUN STAR (V ANS PAT)
  (MVSET (STARTMATCH (POS V) PAT) 'STAR1 (LIST ANS (ALS V)))) 

(DEFUN STAR1 (V ANS FALS)
  (ADD (VALUE (POS V) (MERGE FALS (ALS V))) ANS)) 

(DEFUN STARTMATCH (ST PAT)
  (PROG (W X V)
    (COND ((ATOM PAT)
           (SETQ W (GET PAT 'REWRITE:))
           (COND ((NULL W)
                  (RETURN (COND ((NULL ST) NIL)
                                ((OR (EQ PAT '?)
                                     (EQ PAT (CAR ST)))
                                 (LIST (VALUE (CDR ST) NIL)))
                                (T NIL)))))
           (SETQ W (GPAT W))
           (SETQ V (GET PAT 'VSET))
           (COND ((NULL V) (SETQ CLEARLIST (CONS PAT CLEARLIST))))
           (SETQ X (ASSQ ST V))
           (COND (X (SETQ RECURSIONPOSSIBLE T) (RETURN (CDR X))))
           (SETQ X (CREATESET))
           (PUTPROP PAT (CONS (CONS ST X) V) 'VSET)
           (COND ((ATOM W)
                  (MVSET (STARTMATCH ST W) 'ADD (LIST X)))
                 (T (CONTINUEMATCH ST W X)))
           (RETURN X)))
    (SETQ V (CREATESET))
    (CONTINUEMATCH ST PAT V)
    (RETURN V))) 

(DEFUN TEST NIL
  (PROG (BT TT OT)
    (SETQ EXS1 (SUBST NIL NIL EXS))
    (SETQ BT (RUNTIME))
    (SETQ OT BT)
    (SSTATUS GCTIME 0.)
    (DO X
     EXS1 (CDR X)
        (NULL X)
        (COND ((NOT (EQUAL (PROG (A B)
                             (SETQ B (CAAR X))
                             (DO Z
                              0. (1+ Z)
                                 (= Z ITERATIONS)
                                 (SETQ A (EVAL B)))
                             (RETURN A))
                           (CADAR X)))
               (ERROR))
              (T (SETQ TT (RUNTIME))
                 (PRINT (DIFFERENCE TT BT))
                 (SETQ BT TT))))
    (SETQ TT (STATUS GCTIME))
    (PRINT (DIFFERENCE (RUNTIME) OT))
    (PRINT (STATUS GCTIME)))) 

(DEFUN TIEUP (SS)
  (MAPC (*FUNCTION (LAMBDA (PR)
                     (MAPC (*FUNCTION (LAMBDA (ARC)
                                        (RPLACD ARC
                                                (ASSQ (CDR ARC) SS))))
                           (CDR PR))))
        SS)
  (MAPC (*FUNCTION (LAMBDA (PR)
                     (RPLACA PR
                             (COND ((MEMQ 'FINAL (CAR PR)) T)
                                   (T NIL)))))
        SS)
  SS) 

(DEFUN UNIQUESET (S)
  (PROG (X Y PLIST)
        (SETQ S (SORT S 'CNUM))
        (COND ((NULL STATESETS)
               (SETQ STATESETS (LIST (LIST S)))
               (RETURN S)))
        (SETQ PLIST STATESETS)
   N    (SETQ X S)
        (SETQ Y (CAAR PLIST))
   L    (COND ((AND (NULL X) (NULL Y)) (RETURN (CAAR PLIST)))
              ((NULL X) (GO M))
              ((NULL Y) (GO M))
              ((EQ (CAR X) (CAR Y))
               (SETQ X (CDR X))
               (SETQ Y (CDR Y))
               (GO L))
              (T (GO M)))
   M    (COND ((NULL (CDR PLIST))
               (RPLACD PLIST (LIST (LIST S)))
               (RETURN (CAADR PLIST))))
        (SETQ PLIST (CDR PLIST))
        (GO N))) 

(DEFUN TEST1 NIL
  (PROG (BT)
    (SETQ BT (RUNTIME))
    (DO C
     TEST-PATTERN (CDR C)
        (NULL C)
        (DO S
          (CDAR C)
          (CDR S)
          (NULL S)
          (MATCH (EVAL (CAR S)) (EVAL (CAAR C)))
          (SETQ NT (RUNTIME))
          (PRINT (LIST (CAR S) (- NT BT)))
          (SETQ BT NT)))
    (RETURN (DIFFERENCE (RUNTIME) BT)))) 

(DEFUN GLITCH (X)
  (COND ((ATOM X) (LIST '! X))
        (T X))) 

(SETQ *RSET NIL)

(SETQ NOUUO NIL)



----------------------------------------------------------------

∂09-Nov-82  0853	GBROWN at DEC-MARLBORO 	WHETSTONE BENCHMARK  
Date: 9 Nov 1982 1144-EST
From: GBROWN at DEC-MARLBORO
To: RPG at SU-AI
Subject: WHETSTONE BENCHMARK
Message-ID: <"MS10(2055)+GLXLIB1(1056)" 11870587519.24.431.15614 at DEC-MARLBORO>

Dick,
Here is a single and double precision whetstone program.  Also, I will mail
you an old paper that describes whetstones and contains the source in ALGOL
if you send me an address. Apparently, one of the tricks for doing whetstones
these days is to pull the the routine P3 called in the loopf N8 in line.
Then determine that it doesn't do anything and evaporate the entire loop.
Since this is the heaviest weighted segment of the program, it makes your
whetstone numbers look very good.  Since our FORTRAN compiler doesn't do
this, we claim it is not in the spirit of the benchmark.

I have been thinking about the discussion we had about the S1 compiler.
I think it was decided that using that type technology would give us
somthing less than a factor of two in performance.  Did that factor
only apply to the register allocation techiniques that S1 uses?  I guess
the real question is, would there be any advantage to us in using the
source tranform part of the compiler as a kind of front end to the SPICE
compiler?

Do you know where or if I can obtain a copy of the "@" listing generator?
I like things that print pretty.
-Gary
!C	WHETS.FOR	09/27/77	TDR
C	...WHICH IS AN IMPROVED VERSION OF
C       WHET1A.FTN        01/22/75     RBG
C       SINGLE-PRECISION VARIANT OF PROGRAM
C
C	THIS PROGRAM IS THE
C       "WHETSTONE INSTRUCTIONS PER SECONDS" MEASURE OF FORTRAN
C       AND CPU PERFORMANCE.
C
C	IT WAS DEVELOPED BY THE BRITISH CENTRAL COMPUTER AGENCY AND
C       OBTAINED BY A ROUNDABOUT MEANS FROM A CUSTOMER WHO RECEIVED
C       A LISTING OF THE SOURCE PROGRAM FROM DG MARKETING.
C
	DIMENSION TIMES(3)
C
C       COMMON WHICH REFERENCES LOGICAL UNIT ASSIGNMENTS
C
C	this version outputs to terminal 	
C        COMMON /LUNS/ ICRD,ILPT,IKBD,ITTY
C
	COMMON T,T1,T2,E1(4),J,K,L
	T=0.499975E00
	T1=0.50025E00
	T2=2.0E00
	CALL HEADER('for benchmark - whets',17)
C	WRITE(ITTY,1)
C	READ(IKBD,2) I
1       FORMAT(' TYPE LOOP COUNT (I4 FORMAT)'/)
2       FORMAT(I4)
C
C
C
C       ***** BEGININNING OF TIMED INTERVAL *****
	DO 200 ILOOP = 1,3
	I = ILOOP*100
C	INITIALIZE TIMER
	CALL TIMRB
C       *****                               *****
C
	N1=0
	N2=12*I
	N3=14*I
	N4=345*I
	N5=0
	N6=210*I
	N7=32*I
	N8=899*I
	N9=616*I
	N10=0
	N11=93*I
	N12=0
	X1=1.0E0
	X2=-1.0E0
	X3=-1.0E0
	X4=-1.0E0
	IF(N1)19,19,11
 11	DO 18 I=1,N1,1
	X1=(X1+X2+X3-X4)*T
	X2=(X1+X2-X3+X4)*T
	X4=(-X1+X2+X3+X4)*T
	X3=(X1-X2+X3+X4)*T
 18	CONTINUE
 19	CONTINUE
	CALL POUT(N1,N1,N1,X1,X2,X3,X4)
	E1(1)=1.0E0
	E1(2)=-1.0E0
	E1(3)=-1.0E0
	E1(4)=-1.0E0
	IF(N2)29,29,21
 21	DO 28 I=1,N2,1
	E1(1)=(E1(1)+E1(2)+E1(3)-E1(4))*T
	E1(2)=(E1(1)+E1(2)-E1(3)+E1(4))*T
	E1(3)=(E1(1)-E1(2)+E1(3)+E1(4))*T
	E1(4)=(-E1(1)+E1(2)+E1(3)+E1(4))*T
 28	CONTINUE
 29	CONTINUE
	CALL POUT(N2,N3,N2,E1(1),E1(2),E1(3),E1(4))
	IF(N3)39,39,31
 31	DO 38 I=1,N3,1
 38	CALL PA(E1)
 39	CONTINUE
	CALL POUT(N3,N2,N2,E1(1),E1(2),E1(3),E1(4))
	J=1
	IF(N4)49,49,41
 41	DO 48 I=1,N4,1
	IF(J-1)43,42,43
 42	J=2
	GOTO44
 43	J=3
 44	IF(J-2)46,46,45
 45	J=0
	GOTO47
 46	J=1
 47	IF(J-1)411,412,412
 411	J=1
	GOTO48
 412	J=0
 48	CONTINUE
 49	CONTINUE
	CALL POUT(N4,J,J,X1,X2,X3,X4)
	J=1
	K=2
	L=3
	IF(N6)69,69,61
 61	DO 68 I=1,N6,1
	J=J*(K-J)*(L-K)
	K=L*K-(L-J)*K
	L=(L-K)*(K+J)
	E1(L-1)=J+K+L
	E1(K-1)=J*K*L
 68	CONTINUE
 69	CONTINUE
	CALL POUT(N6,J,K,E1(1),E1(2),E1(3),E1(4))
	X=0.5E0
	Y=0.5E0
	IF(N7)79,79,71
 71	DO 78 I=1,N7,1
	X=T*ATAN(T2*SIN(X)*COS(X)/(COS(X+Y)+COS(X-Y)-1.0E0))
	Y=T*ATAN(T2*SIN(Y)*COS(Y)/(COS(X+Y)+COS(X-Y)-1.0E0))
 78	CONTINUE
 79	CONTINUE
	CALL POUT(N7,J,K,X,X,Y,Y)
	X=1.0E0
	Y=1.0E0
	Z=1.0E0
	IF(N8)89,89,81
 81	DO 88 I=1,N8,1
 88	CALL P3(X,Y,Z)
 89	CONTINUE
	CALL POUT(N8,J,K,X,Y,Z,Z)
	J=1
	K=2
	L=3
	E1(1)=1.0E0
	E1(2)=2.0E0
	E1(3)=3.0E0
	IF(N9)99,99,91
 91	DO 98 I=1,N9,1
 98	CALL P0
 99	CONTINUE
	CALL POUT(N9,J,K,E1(1),E1(2),E1(3),E1(4))
	J=2
	K=3
	IF(N10)109,109,101
 101	DO 108 I=1,N10,1
	J=J+K
	K=J+K
	J=J-K
	K=K-J-J
 108	CONTINUE
 109	CONTINUE
	CALL POUT(N10,J,K,X1,X2,X3,X4)
	X=0.75E0
	IF(N11)119,119,111
 111	DO 118 I=1,N11,1
 118	X=SQRT(EXP(ALOG(X)/T1))
 119	CONTINUE
	CALL POUT(N11,J,K,X,X,X,X)
C
C       ***** END OF TIMED INTERVAL         *****
200	CALL TIMRL(TIMES(ILOOP))
C
C	WHET. IPS = 1000/(TIME FOR 10 ITERATIONS OF PROGRAM)
	WHETS = 10000./(TIMES(3)-TIMES(2))
	WRITE (6,201) WHETS
201	FORMAT(' SPEED IS: ',F8.0,' THOUSAND WHETSTONE',
	2 ' SINGLE PRECISION INSTRUCTIONS PER CPUSECOND')
C       *****                              *****
C
	END
	SUBROUTINE PA(E)
	COMMON T,T1,T2
	DIMENSION E(4)
	J=0
 1	E(1)=(E(1)+E(2)+E(3)-E(4))*T
 	E(2)=(E(1)+E(2)-E(3)+E(4))*T
 	E(3)=(E(1)-E(2)+E(3)+E(4))*T
 	E(4)=(-E(1)+E(2)+E(3)+E(4))/T2
	J=J+1
	IF(J-6)1,2,2
 2	CONTINUE
	RETURN
	END
	SUBROUTINE P0
	COMMON T,T1,T2,E1(4),J,K,L
	E1(J)=E1(K)
	E1(K)=E1(L)
	E1(L)=E1(J)
	RETURN
	END
	SUBROUTINE P3(X,Y,Z)
	COMMON T,T1,T2
	X1=X
	Y1=Y
	X1=T*(X1+Y1)
	Y1=T*(X1+Y1)
	Z=(X1+Y1)/T2
	RETURN
	END
	SUBROUTINE POUT(N,J,K,X1,X2,X3,X4)
C
C       WRITE STATEMENT COMMENTED OUT TO IMPROVE REPEATABILITY OF TIMINGS
C
C	WRITE(2,1)N,J,K,X1,X2,X3,X4
 1	FORMAT(1H,3I7,4E12.4)
	RETURN
	END

!C	WHETD.FOR	09/27/77	TDR
C	...WHICH IS AN IMPROVED VERSION OF:
C       WHET2A.FTN        01/22/75     RBG
C       DOUBLE-PRECISION VARIANT OF PROGRAM
C
C	THIS PROGRAM IS THE
C       "WHETSTONE INSTRUCTIONS PER SECONDS" MEASURE OF FORTRAN
C       AND CPU PERFORMANCE.
C
C	IT WAS DEVELOPED BY THE BRITISH CENTRAL COMPUTER AGENCY AND
C       OBTAINED BY A ROUNDABOUT MEANS FROM A CUSTOMER WHO RECEIVED
C       A LISTING OF THE SOURCE PROGRAM FROM DG MARKETING.
C
	DOUBLE PRECISION X1,X2,X3,X4,X,Y,Z,T,T1,T2,E1
C
	DIMENSION TIMES(3)
C
C       COMMON WHICH REFERENCES LOGICAL UNIT ASSIGNMENTS
C
C	this version does output to logical unit 6
C        COMMON /LUNS/ ICRD,ILPT,IKBD,ITTY
C
	COMMON T,T1,T2,E1(4),J,K,L
	T=0.499975D00
	T1=0.50025D00
	T2=2.0D00
C	WRITE(ITTY,1)
C	READ(IKBD,2) I
1       FORMAT(' TYPE LOOP COUNT (I3 FORMAT)'/)
2       FORMAT(I3)
C
	CALL HEADER('for benchmark - whetd',17)
C
C       ***** BEGININNING OF TIMED INTERVAL *****
	DO 200 ILOOP = 1,3
	I = ILOOP*100
	CALL TIMRB
C	*******************************************
C
C       *****                               *****
C
	N1=0
	N2=12*I
	N3=14*I
	N4=345*I
	N5=0
	N6=210*I
	N7=32*I
	N8=899*I
	N9=616*I
	N10=0
	N11=93*I
	N12=0
	X1=1.0D0
	X2=-1.0D0
	X3=-1.0D0
	X4=-1.0D0
	IF(N1)19,19,11
 11	DO 18 I=1,N1,1
	X1=(X1+X2+X3-X4)*T
	X2=(X1+X2-X3+X4)*T
	X4=(-X1+X2+X3+X4)*T
	X3=(X1-X2+X3+X4)*T
 18	CONTINUE
 19	CONTINUE
	CALL POUT(N1,N1,N1,X1,X2,X3,X4)
	E1(1)=1.0D0
	E1(2)=-1.0D0
	E1(3)=-1.0D0
	E1(4)=-1.0D0
	IF(N2)29,29,21
 21	DO 28 I=1,N2,1
	E1(1)=(E1(1)+E1(2)+E1(3)-E1(4))*T
	E1(2)=(E1(1)+E1(2)-E1(3)+E1(4))*T
	E1(3)=(E1(1)-E1(2)+E1(3)+E1(4))*T
	E1(4)=(-E1(1)+E1(2)+E1(3)+E1(4))*T
 28	CONTINUE
 29	CONTINUE
	CALL POUT(N2,N3,N2,E1(1),E1(2),E1(3),E1(4))
	IF(N3)39,39,31
 31	DO 38 I=1,N3,1
 38	CALL PA(E1)
 39	CONTINUE
	CALL POUT(N3,N2,N2,E1(1),E1(2),E1(3),E1(4))
	J=1
	IF(N4)49,49,41
 41	DO 48 I=1,N4,1
	IF(J-1)43,42,43
 42	J=2
	GOTO44
 43	J=3
 44	IF(J-2)46,46,45
 45	J=0
	GOTO47
 46	J=1
 47	IF(J-1)411,412,412
 411	J=1
	GOTO48
 412	J=0
 48	CONTINUE
 49	CONTINUE
	CALL POUT(N4,J,J,X1,X2,X3,X4)
	J=1
	K=2
	L=3
	IF(N6)69,69,61
 61	DO 68 I=1,N6,1
	J=J*(K-J)*(L-K)
	K=L*K-(L-J)*K
	L=(L-K)*(K+J)
	E1(L-1)=J+K+L
	E1(K-1)=J*K*L
 68	CONTINUE
 69	CONTINUE
	CALL POUT(N6,J,K,E1(1),E1(2),E1(3),E1(4))
	X=0.5D0
	Y=0.5D0
	IF(N7)79,79,71
 71	DO 78 I=1,N7,1
	X=T*DATAN(T2*DSIN(X)*DCOS(X)/(DCOS(X+Y)+DCOS(X-Y)-1.0D0))
	Y=T*DATAN(T2*DSIN(Y)*DCOS(Y)/(DCOS(X+Y)+DCOS(X-Y)-1.0D0))
 78	CONTINUE
 79	CONTINUE
	CALL POUT(N7,J,K,X,X,Y,Y)
	X=1.0D0
	Y=1.0D0
	Z=1.0D0
	IF(N8)89,89,81
 81	DO 88 I=1,N8,1
 88	CALL P3(X,Y,Z)
 89	CONTINUE
	CALL POUT(N8,J,K,X,Y,Z,Z)
	J=1
	K=2
	L=3
	E1(1)=1.0D0
	E1(2)=2.0D0
	E1(3)=3.0D0
	IF(N9)99,99,91
 91	DO 98 I=1,N9,1
 98	CALL P0
 99	CONTINUE
	CALL POUT(N9,J,K,E1(1),E1(2),E1(3),E1(4))
	J=2
	K=3
	IF(N10)109,109,101
 101	DO 108 I=1,N10,1
	J=J+K
	K=J+K
	J=J-K
	K=K-J-J
 108	CONTINUE
 109	CONTINUE
	CALL POUT(N10,J,K,X1,X2,X3,X4)
	X=0.75D0
	IF(N11)119,119,111
 111	DO 118 I=1,N11,1
 118	X=DSQRT(DEXP(DLOG(X)/T1))
 119	CONTINUE
	CALL POUT(N11,J,K,X,X,X,X)
C
C       ***** END OF TIMED INTERVAL         *****
200	CALL TIMRL(TIMES(ILOOP))
C
C	WHET. IPS = 1000/(TIME FOR 10 ITERATIONS OF PROGRAM LOOP)
	WHETS = 10000./(TIMES(3)-TIMES(2))
	WRITE (6,201) WHETS
201	FORMAT(' SPEED IS: ',F8.0,' THOUSAND WHETSTONE',
	2  ' DOUBLE PRECISION INSTRUCTIONS PER CPUSECOND')
C
C
	END
	SUBROUTINE PA(E)
	DOUBLE PRECISION T,T1,T2,E
	COMMON T,T1,T2
	DIMENSION E(4)
	J=0
 1	E(1)=(E(1)+E(2)+E(3)-E(4))*T
 	E(2)=(E(1)+E(2)-E(3)+E(4))*T
 	E(3)=(E(1)-E(2)+E(3)+E(4))*T
 	E(4)=(-E(1)+E(2)+E(3)+E(4))/T2
	J=J+1
	IF(J-6)1,2,2
 2	CONTINUE
	RETURN
	END
	SUBROUTINE P0
	DOUBLE PRECISION T,T1,T2,E1
	COMMON T,T1,T2,E1(4),J,K,L
	E1(J)=E1(K)
	E1(K)=E1(L)
	E1(L)=E1(J)
	RETURN
	END
	SUBROUTINE P3(X,Y,Z)
	DOUBLE PRECISION T,T1,T2,X1,Y1,X,Y,Z
	COMMON T,T1,T2
	X1=X
	Y1=Y
	X1=T*(X1+Y1)
	Y1=T*(X1+Y1)
	Z=(X1+Y1)/T2
	RETURN
	END
	SUBROUTINE POUT(N,J,K,X1,X2,X3,X4)
C
C       WRITE STATEMENT COMMENTED OUT TO IMPROVE REPEATABILITY OF TIMINGS
C
	DOUBLE PRECISION X1,X2,X3,X4
C	WRITE(2,1)N,J,K,X1,X2,X3,X4
 1	FORMAT(1H,3I7,4E12.4)
	RETURN
	END
!C     TIMER.FTN - FORTRAN IV-PLUS CODING FOR TIMRB,TIMRE
C
C     THIS SUBROUTINE ALSO DEFINES AND DATA-INITIALIZES LOGICAL
C     UNITS WHICH WILL BE USED BY BENCHMARK PROGRAMS
C
      SUBROUTINE TIMRB
      COMMON /IEYQQQ/ T
C     DEFINE AND INITIALIZE LOGICAL UNIT ASSIGNMENTS:
C         ICRD  - 1    DATA INPUT (IF REQUIRED)
C         ILPT  - 6    LINE PRINTER BULK OUTPUT
C         IKBD  - 5    KEYBOARD INPUT (IF REQUIRED)
C         ITTY  - 5    TTY OUTPUT, INCLUDING ELAPSED TIME INFORMATION
C
      COMMON /LUNS/ ICRD,ILPT,IKBD,ITTY
      DATA          ICRD,ILPT,IKBD,ITTY / 1,6,5,5 /
C
C Secnds returns system time (clock, not cpu) in seconds minus its argument
C
      T = SECNDS(0.0)
      RETURN

      ENTRY TIMRE
      WRITE(ITTY,1) SECNDS(T)
1     FORMAT (' ELAPSED TIME WAS', F8.2, ' SECONDS')
      END
   --------

∂10-Nov-82  0903	Friedland 	D0 memory benchmark
Date: 10 Nov 1982 0114-PST
From: Friedland
Subject: D0 memory benchmark
To:   rindfleisch, masinter@PARC, cschmidt, bach, haken@SUMEX-AIM
cc:   feigenbaum, brown
Remailed-date: 10 Nov 1982 0808-PST
Remailed-from: CSCHMIDT at SUMEX-AIM
Remailed-to: Dolphin-Users at SUMEX-AIM

(Chris, you can forward this to the mailing list if you want),

I began some benchmarks today with some interesting and surprising
results.  I ran a problem which searched through a large address 
space, touching about 200 units in a knowledge base.  All were in
"core", so the test avoided any manual disk reading speed problems.
The test was running WIND in a pretty clean sysout with either 6 or 8
memory boards.  With 8 boards, the test run took 48 seconds the first
time, then 13 to 14 seconds each of the next ten times.  I then did
a logout), removed two boards, and reran the test.  The first run took
62 seconds, then each of the next ten runs took 45-48 seconds.  I then
did a logout) again, put back the two boards, and tried again.  Again,
the first run took 48 seconds and the next ten took 13-14 seconds.

This is obviously only the beginning of some useful tests.   I find it
rather startling that increasing memory size by 33% addeds 300% in speed.
We will try this test with 7 boards to judge the curve and will also
try a variety of other tasks to see if the results hold.  Subjectively,
the additional two memory boards seem to significantly speed up almost
all of our operations, but I realize that hard numbers are needed to
judge this.  Certainly I wonder how much more memory will continue to
increase performance.  I. e. have we gained as much as we can, or are
there more factors of 2 or 3 to get.

Comments and further suggestions will be appreciated.

Peter
-------

∂28-Nov-82  2251	CL.BOYER at UTEXAS-20 	FRANZ AND REWRITE
Date: Monday, 29 November 1982  00:52-CST
From: CL.BOYER at UTEXAS-20
To:   rpg at su-ai
Subject: FRANZ AND REWRITE

Courtesy of Gordan Novak and Rich Fateman, I have Franz
timings of the REWRITE benchmark.  Below is the current
table.  Full code and transcripts are in
[UTEXAS-20]<CL.BOYER>LISP.COMPARISONS.  Gordon is hoping to
run a PSL test on a SUN. (LISP.CMP[TIM,LSP]).

MACLISP (2060)                                8.5        5.3    13.8
UCILISP (2060, (nouuo nil))                   9.3        4.7    14
INTERLISP (bcompl, blklib, swap, 2060)       11          6      17
ELISP  (2060)                                11          7      18
INTERLISP (bcompl, blklib, noswap, 2060)     11          7.5    18.5
INTERLISP (Dorado, bcompl, blklib)           18         11      29
FRANZ (all localf, VAX-780)                  21         18      39
INTERLISP (bcompl, no blklib, noswap, 2060)  39          7      46
FRANZ (translink=on, VAX-780)                37         17      54
INTERLISP (bcompl, no blklib, swap, 2060)    64          6      70
INTERLISP (VAX-780)                          80          3      83
ZETALISP (LM-2, 1meg, gc-on)          breakdown not available   97
FRANZ VAX-780 (translink=nil)               130         18     148
INTERLISP (Dolphin, 1meg, bcompl, blklib)   132         35     167

∂31-Jan-83  1751	RPG  
Yow, bad programming style for MIN:
(defun baz (x y)
       (prog ()
	     (return (prog1 x 
			    (cond ((> x y) (return y)))))))

(baz 1 2)
(baz 2 1)

∂12-Mar-83  1544	BROOKS%MIT-OZ@MIT-MC 	[George J. Carrette <GJC @ MIT-ML>: 750 floating-point accelerator] 
Received: from USC-ECL by SU-AI with NCP/FTP; 12 Mar 83  15:43:54 PST
Received: from MIT-MC by USC-ECL; Sat 12 Mar 83 06:10:17-PST
Date: 12 Mar 1983 0907-EST
From: BROOKS%MIT-OZ@MIT-MC
Subject: [George J. Carrette <GJC @ MIT-ML>: 750 floating-point accelerator]
To: rpg%sail@USC-ECL

Return-path: <GJC@MIT-ML>
Date: 12 March 1983 03:30 EST
From: George J. Carrette <GJC @ MIT-ML>
Subject: 750 floating-point accelerator
To: robot-facilities @ MIT-OZ
cc: gerard @ MIT-CORWIN

USRD$:[GJC.FORTRAN]TIMETEST.COM indicates that the 750FPA speeds
up the computations about 2 to 3 times.

Timings of a loop such as

(DO ((J 0 (1+ J)))
    ((= J 1000000))
  (SETQ Z (*$ X Y)))

where the fortran compiler has been told to do no optimization (making
J,Z,X,Y all stack references, fixed offset from SP), compiles into
two instuctions, and does about 120 thousand of these loops per second.
A 780 with FPA does about 204 thousand. Similar timings are generated
for array-processing code compiled with optimizations, such that
the index calculations take place in registers.

Now, I just tried this stuff out on three more machines, the CADR, the KA,
and the KL, all using lisp. Here is the summary:

Machine Loops/Sec
  780    204K
  750    120K
   KL    108K
   KA     19K
 CADR     18K

The thing to note here, is that the fortran compilations where done
without optimization. That is to say, a simple order-of-executation,
depth-first, tree-walk compiler could have generated the same code.
NIL presently has such a compiler, and with the addition of flonum
declarations could generate such code. Therefore, the prediction,

  for number-crunching, NIL on a 750 will beat maclisp on a KL.

-GJC

p.s. figures on the 3600 are best given in private.
-------

∂14-Mar-83  2359	TYSON@SRI-AI 	Lisp timings    
Received: from SU-SCORE by SU-AI with PUP; 14-Mar-83 23:59 PST
Received: from SRI-AI.ARPA by SU-SCORE.ARPA with TCP; Mon 14 Mar 83 23:56:11-PST
Date: Mon 14 Mar 83 23:49:56-PST
From: Mabry Tyson <Tyson%SRI-AI@SU-SCORE.ARPA>
Subject: Lisp timings
To: rpg@SU-AI.ARPA

I don't know if you are still interested in lisp benchmarks anymore.
As you probably know, we have some Symbolics LM-2's and 3600's at
SRI.  I ported the benchmark programs over there and have run them
through it.  I did it partly out of curiousity to see how things
were going on the 3600 as its software and hardware is still being
improved.

Earlier, Symbolics asked us not to run benchmarks on the 3600 we had
as a beta test site.  We now have another 3600 that I used for
the benchmarks.  However, I still feel a little uneasy about
releasing the results.  If anyone needs them (to determine which machine
to buy), you can direct them to me.

Offhand the results show that the 3600 is much better than the LM-2
at function calling, much slower on bignums.  I'm not sure about consing -
I'd have to look more closely at the results.  Someone here ran a couple
of test programs that, amongst the computations, printed stuff to the
screen.  The visual comparison was that it was about the speed of the
Dorado compiled.  Much slower than the Dorado interpreted.  (I should say
that this is second-hand - I didn't see the Dorado test.  The test program
is a real program, not a made-up benchmark.)

I have all (most?) of the mail that was sent to the benchmark people,
including the compilation of all the results on TAK.  Did you compile
any of the other results into a similar table?
-------

∂26-May-83  1134	SCHMIDT@SUMEX-AIM 	extra 1100 memory benchmark    
Received: from SUMEX-AIM by SU-AI with PUP; 26-May-83 11:34 PDT
Date: Thu 26 May 83 09:39:17-PDT
From: Christopher Schmidt <SCHMIDT@SUMEX-AIM>
Subject: extra 1100 memory benchmark
To: Dolphin-Users@SUMEX-AIM

	Here are some data Gordon Foyster at Stanford gathered last November to
study the benefit of adding extra memory to a Dolphin with and without a color
board, when executing a large expert system.  It should be noted that as with
any benchmark, these numbers reflect a particular system running in a
particular environment, in a particular version of Interlisp on a particular
problem, and should not be casually interpreted as applicable to any other
problem.  [In this case the pre-release version of WIND was used.]
	The task used a large amount of compiled code. Most of the non-swap
time was spent in accessing data structures. Times are averaged over 2 or 3
very consistent runs.

   No. memory boards       Color Monitor         Time (sec)
   -----------------       -------------         ----------
           6                    NO                 235
           7                    NO                 180
           8                    NO                 150
           6                    YES                655
           7                    YES                285
           8                    YES                260

	He also gathered some figures on the number of increments by the
counter on the front of the Dolphin for each of the above tasks. These numbers
closely follow the time differences.
-------

∂26-May-83  1620	Masinter.PA%PARC-MAXC.ARPA@SUMEX-AIM 	Relation of real memory to performance in a demand paging
Received: from SUMEX-AIM by SU-AI with PUP; 26-May-83 16:20 PDT
Received: from PARC-MAXC.ARPA by SUMEX-AIM.ARPA with TCP; Thu 26 May 83 15:54:43-PDT
Date: 26 May 83 15:53:56 PDT (Thursday)
From: Masinter.PA@PARC-MAXC.ARPA
Subject: Relation of real memory to performance in a demand paging
 system
In-reply-to: SCHMIDT's message of Thu, 26 May 83 09:39:17 PDT
To: Schmidt@sumex-aim.ARPA
cc: Dolphin-Users@SUMEX-AIM.ARPA

There is a standard set of results on the relationship of performance
and memory size which are at least alluded to in the standard operating
systems texts.

For any given computation run on a demand paging system, there is a
curve which relates compute time to memory size. For very small amounts
of memory, the compute time is very high, because "memory references" in
fact require disk accesses. As the amount of memory increases, compute
time decreases with a curve that looks like 1/n, until finally, the
entire computation runs entirely resident in real memory, at which time
the curve becomes completely flat.

For a given computation, there is a "knee" in the curve which
corresponds to the place where the  compute time begins to dominate
swapping time. The amount of memory which corresponds to the "knee" is
often called the "working set" of the computation. Timesharing systems
try to estimate the working set of the computations in their scheduling
queues, in order to keep the programs which are allowed to run from
spending all/most of their time waiting for the disk (thrashing.)

Each computation has a DIFFERENT curve, although the general shape of
the curve is the same. Depending on the location of the curve, adding
more memory can result in either a substantial performance improvement
(if the amount of memory is much less than the current 'working set'),
or almost no improvement (if the amount of memory is much greater than
the current 'working set'.)

To explain the data in your message, note that on an 1100, the color
display bitmap, when displaying something, comes out of real memory for
the system, and of course is not swapped out. The color bitmap on an
1100 is 640x480x4 bits, or 153K bytes, which is 80% of one of the
(older) 1100 memory "boards". In addition, displaying the color bitmap
on an 1100 uses an additional 35%-40% of machine cycles if you do not
simultaneously turn off the back and white display (which I assume you
did not.)

Larry


∂24-May-83  1344	PW  	Benchmark update    
I've put DIRECTORY and BENCH on the LM-2 (my directory there is PW, of course).
Mark says that if BENCH is on MC, that's all we need to get it to a SYMBOLICS
3600.  He also wants to check out how we meter it, so maybe I'll do the 3600
first.  At any rate, I'll want to poke around a bit before I go over there.
I hope to start generating meaningful numbers next week, but I'll review 
everything with you first.

Most of what I'm doing either requires the LM-2, or I can do it from home.  
Thus, I'm generally not coming to MJH a lot during business hours.  I may even 
see about getting some 3600 time at Symbolics, though I doubt that's possible, 
given their machine-availability crunch.  I'm geting spoiled on Lisp Machines
fast.

∂31-May-83  1117	RPG   	traverse benchmark
 ∂31-May-83  1116	jkf%UCBKIM@Berkeley 	traverse benchmark 
Received: from UDEL-RELAY by SU-AI with TCP/SMTP; 31 May 83  11:16:38 PDT
Date: 31 May 83 11:01:08 PDT (Tue)
From: jkf%UCBKIM@Berkeley (John Foderaro)
Return-Path: <jkf%UCBKIM@Berkeley>
Subject: traverse benchmark
Received: by UCBKIM.ARPA (3.340/3.5)
	id AA13717; 31 May 83 11:01:08 PDT (Tue)
Received: from UCBKIM.ARPA by UCBVAX.ARPA (3.341/3.29)
	id AA11025; 31 May 83 11:00:19 PDT (Tue)
Received: from UCBVAX.ARPA by udel-relay.ARPA ; 31 May 83 14:02:52 EDT (Tue)
Message-Id: <8305311801.13717@UCBKIM.ARPA>
To: rpg@su-ai

 Here is the traverse benchmark.  Two data points are missing.  After two
days of trying to get an unloaded 750 which is up for sufficiently long
to measure it, I gave up.  Perhaps I'll fill it in someday.

780cpu,gc/750cpu,gc [seconds]  

>>> traverse: init-tlimit
					translinks 
			on				off

localf		18.47,11.8/32.98,19.3		18.7,12.73/33.18,20.75

compiled	20.42,11.77/35.45,19.72		29.61.68,12.82/51.23,21.03

interpreted	1059.13,13.58/1758.35,21.22	1068.2,13.88/1780.75,22.05

>>> traverse: timit
					translinks 
			on				off

localf		    82.98/132.63		    83.13/132.6

compiled	    156.21/244.01		    559.58/911.2

interpreted	    5225.58/			    5268.46/


∂01-Jun-83  2314	RPG   	boyer   
 ∂01-Jun-83  2251	jkf%UCBKIM@Berkeley 	boyer    
Received: from UCB-VAX by SU-AI with TCP/SMTP; 1 Jun 83  22:45:09 PDT
Date: 1 Jun 83 22:46:28 PDT (Wed)
From: jkf%UCBKIM@Berkeley (John Foderaro)
Subject: boyer
Message-Id: <8306020546.13350@UCBKIM.ARPA>
Received: by UCBKIM.ARPA (3.340/3.5)
	id AA13350; 1 Jun 83 22:46:28 PDT (Wed)
Received: from UCBKIM.ARPA by UCBVAX.ARPA (3.341/3.31)
	id AA10232; 1 Jun 83 22:45:39 PDT (Wed)
To: rpg@su-ai

(780cpu,gc)/(750cpu,gc)  [seconds]

>>> boyer: test
					translinks 
			on				off

localf	    21.33,50.42/36.18,82.35		21.3,21.32/36.15,35.01

compiled    40.13,50.43/62.38,78.68		137.85,21.25/223.47,34.5

interpreted 1061.02,32.25/1699.95,51.77		1058.88,22.47/1685.92,36.73


∂06-Jun-83  0956	RPG   	Relation of real memory to performance in a demand paging 
 ∂26-May-83  1620	Masinter.PA%PARC-MAXC.ARPA@SUMEX-AIM 	Relation of real memory to performance in a demand paging
Received: from SUMEX-AIM by SU-AI with PUP; 26-May-83 16:20 PDT
Received: from PARC-MAXC.ARPA by SUMEX-AIM.ARPA with TCP; Thu 26 May 83 15:54:43-PDT
Date: 26 May 83 15:53:56 PDT (Thursday)
From: Masinter.PA@PARC-MAXC.ARPA
Subject: Relation of real memory to performance in a demand paging
 system
In-reply-to: SCHMIDT's message of Thu, 26 May 83 09:39:17 PDT
To: Schmidt@sumex-aim.ARPA
cc: Dolphin-Users@SUMEX-AIM.ARPA

There is a standard set of results on the relationship of performance
and memory size which are at least alluded to in the standard operating
systems texts.

For any given computation run on a demand paging system, there is a
curve which relates compute time to memory size. For very small amounts
of memory, the compute time is very high, because "memory references" in
fact require disk accesses. As the amount of memory increases, compute
time decreases with a curve that looks like 1/n, until finally, the
entire computation runs entirely resident in real memory, at which time
the curve becomes completely flat.

For a given computation, there is a "knee" in the curve which
corresponds to the place where the  compute time begins to dominate
swapping time. The amount of memory which corresponds to the "knee" is
often called the "working set" of the computation. Timesharing systems
try to estimate the working set of the computations in their scheduling
queues, in order to keep the programs which are allowed to run from
spending all/most of their time waiting for the disk (thrashing.)

Each computation has a DIFFERENT curve, although the general shape of
the curve is the same. Depending on the location of the curve, adding
more memory can result in either a substantial performance improvement
(if the amount of memory is much less than the current 'working set'),
or almost no improvement (if the amount of memory is much greater than
the current 'working set'.)

To explain the data in your message, note that on an 1100, the color
display bitmap, when displaying something, comes out of real memory for
the system, and of course is not swapped out. The color bitmap on an
1100 is 640x480x4 bits, or 153K bytes, which is 80% of one of the
(older) 1100 memory "boards". In addition, displaying the color bitmap
on an 1100 uses an additional 35%-40% of machine cycles if you do not
simultaneously turn off the back and white display (which I assume you
did not.)

Larry


∂06-Jun-83  0958	RPG   	Re:  Franz on the Sun  
 ∂28-May-83  0948	ROD   	Re:  Franz on the Sun  
 ∂27-May-83  1956	mike%Rice.Rice@Rand-Relay 	Re:  Franz on the Sun  
Received: from UDEL-RELAY by SU-AI with TCP/SMTP; 27 May 83  19:55:52 PDT
Date:     Fri, 27 May 83 18:18:47 CDT
From: Mike.Caplinger <mike.rice@Rand-Relay>
Return-Path: <mike%Rice.Rice@Rand-Relay>
Subject:  Re:  Franz on the Sun
Received: from rand-relay.ARPA by UCBVAX.ARPA (3.341/3.29)
	id AA19088; 27 May 83 19:50:15 PDT (Fri)
Received: from UCBVAX.ARPA by UCBKIM.ARPA (3.340/3.5)
	id AA02221; 27 May 83 19:51:33 PDT (Fri)
Received: from UCBKIM.ARPA by UCBVAX.ARPA (3.341/3.29)
	id AA19103; 27 May 83 19:51:37 PDT (Fri)
Received: from UCBVAX.ARPA by udel-relay.ARPA ; 27 May 83 22:55:55 EDT (Fri)
Message-Id:  <1983.05.27.18.18.47.150.08942@dione.rice>
To: franz-friends@Berkeley
In-Reply-To: baden%UCBKIM's message of 25 May 83 13:32:01 PDT (Wed)
Via:  Rice; 27 May 83 19:14-PDT

As I type I'm bringing up the 68K version of Opus 38 (now FTPable from
UCB-VAX) on a SUN running 4.1c.  There don't seem to be any major
problems so far, but the compiler doesn't run on a system with all the
net servers on it because it runs out of memory.  I've been told this
is because there's a bug in 4.1c that forces it to only use 1/2 of the
swap partition.  I'm having to run standalone to compile the compiler;
I don't yet know whether I'll be able to compile other stuff without
this rather extreme fix.

As I use the system more I will post more info to this group.

∂06-Jun-83  1011	RPG  
 ∂29-May-83  2113	RJF@MIT-MC
Received: from MIT-MC by SU-AI with TCP/SMTP; 29 May 83  21:13:01 PDT
Date: 30 May 1983 00:13 EDT
From: Richard J. Fateman <RJF @ MIT-MC>
To: rpg @ SU-AI

have you gotten any 3600 benchmarks?  I have some that were run
at GE (tak, for example).  Before too long we should have times
for a Lisa running Franz;  You should also be able to get SUN 68010
times.  (claim: between 35 and 65 % of a vax 11.780; typically 55%).

I have gotten some 3600 benchmarks `unofficially' and have a collaborator
getting all my stuff on it. It really depends on whether you hit the
non-microcoded stuff. CONS is the same or slower than an LM-2. Arithmetic
and function-call is many times faster. Seems about 3-4 times faster
overall so far. 
			-rpg-

∂06-Jun-83  1015	RPG   	Re: Benchmarking  
 ∂30-May-83  1214	HEDRICK@RUTGERS.ARPA 	Re: Benchmarking  
Received: from RUTGERS by SU-AI with TCP/SMTP; 30 May 83  12:11:27 PDT
Date: Mon 30 May 83 15:07:05-EDT
From: Mgr DEC-20s/Dir LCSR Comp Facility <HEDRICK@RUTGERS.ARPA>
Subject: Re: Benchmarking  
To: RPG@SU-AI.ARPA
In-Reply-To: Message from "Dick Gabriel <RPG@SU-AI>" of Sun 29 May 83 13:10:00-EDT

Sure.  In my opinion ELISP is the only one worth benchmarking, though we
can do the old R/UCI Lisp if you want.  Common Lisp is not ready to be
benchmarked, as it is using runtime support in places where we will open
code.  Since Common Lisp is using Elisp as a base, there is no reason
it shouldn't have the same performance when it is finished.

By the way, our experience suggests that Elisp will lose most benchmarks
but will win in practice.  Unlike most other lisps, there is virtually
nothing that a wizard can do to speed things up in Elisp.  We believe
that we get as good performance as any other PDP-10 Lisp in real
programminng, but are hard pressed to prove it in benchmarking contests.
We do know that when UNITS moved from Interlisp to Elisp, it speeded
up dramatically.  The Interlisp implementers complain that UNITS was
not using Interlisp optimally.  This is probably true.  The point is
that we are concerned about what will happen when real users use the
thing for program development, not what happens when the implementors
optimize performance.
-------

;;; Cull from here
∂06-Jun-83  1031	RPG   	Symbolics 3600 benchmarks   
 ∂30-May-83  1532	fateman%UCBKIM@Berkeley 	Symbolics 3600 benchmarks
Received: from UDEL-RELAY by SU-AI with TCP/SMTP; 30 May 83  15:31:23 PDT
Date: 30 May 83 15:28:18 PDT (Mon)
From: fateman%UCBKIM@Berkeley (Richard Fateman)
Return-Path: <fateman%UCBKIM@Berkeley>
Subject: Symbolics 3600 benchmarks
Received: by UCBKIM.ARPA (3.340/3.5)
	id AA01405; 30 May 83 15:28:18 PDT (Mon)
Received: from UCBKIM.ARPA by UCBVAX.ARPA (3.341/3.29)
	id AA01860; 30 May 83 15:27:17 PDT (Mon)
Received: from UCBVAX.ARPA by udel-relay.ARPA ; 30 May 83 18:29:52 EDT (Mon)
Message-Id: <8305302228.1405@UCBKIM.ARPA>
To: jkf%UCBKIM@Berkeley, rpg@su-ai, sklower%UCBKIM@Berkeley
Cc: fateman%UCBKIM@Berkeley

Times in seconds
(from GE)
(TAK 18 12 6)		VAX 780 Franz	SUN Franz	LM-2 	3600
interpreted		64		152		327	193
compiled		9.1		17.8		3.2	0.6
compiled, translink	2.1		4.8
compiled, localf	1.1

(Queens 5) [five queens problem)
int			80				239
compiled		33		49.6		2.7	2.0
compiled, translink	10.5		17.6
com, localf		 9.9

(from GE report, "Lisp Programming Environments" by Art Duncan and others).
I do not havethe full report at this time.

∂06-Jun-83  1035	RPG   	traverse benchmark
 ∂31-May-83  1116	jkf%UCBKIM@Berkeley 	traverse benchmark 
Received: from UDEL-RELAY by SU-AI with TCP/SMTP; 31 May 83  11:16:38 PDT
Date: 31 May 83 11:01:08 PDT (Tue)
From: jkf%UCBKIM@Berkeley (John Foderaro)
Return-Path: <jkf%UCBKIM@Berkeley>
Subject: traverse benchmark
Received: by UCBKIM.ARPA (3.340/3.5)
	id AA13717; 31 May 83 11:01:08 PDT (Tue)
Received: from UCBKIM.ARPA by UCBVAX.ARPA (3.341/3.29)
	id AA11025; 31 May 83 11:00:19 PDT (Tue)
Received: from UCBVAX.ARPA by udel-relay.ARPA ; 31 May 83 14:02:52 EDT (Tue)
Message-Id: <8305311801.13717@UCBKIM.ARPA>
To: rpg@su-ai

 Here is the traverse benchmark.  Two data points are missing.  After two
days of trying to get an unloaded 750 which is up for sufficiently long
to measure it, I gave up.  Perhaps I'll fill it in someday.

780cpu,gc/750cpu,gc [seconds]  

>>> traverse: init-tlimit
					translinks 
			on				off

localf		18.47,11.8/32.98,19.3		18.7,12.73/33.18,20.75

compiled	20.42,11.77/35.45,19.72		29.61.68,12.82/51.23,21.03

interpreted	1059.13,13.58/1758.35,21.22	1068.2,13.88/1780.75,22.05

>>> traverse: timit
					translinks 
			on				off

localf		    82.98/132.63		    83.13/132.6

compiled	    156.21/244.01		    559.58/911.2

interpreted	    5225.58/			    5268.46/


∂06-Jun-83  1039	RPG   	boyer   
 ∂01-Jun-83  2251	jkf%UCBKIM@Berkeley 	boyer    
Received: from UCB-VAX by SU-AI with TCP/SMTP; 1 Jun 83  22:45:09 PDT
Date: 1 Jun 83 22:46:28 PDT (Wed)
From: jkf%UCBKIM@Berkeley (John Foderaro)
Subject: boyer
Message-Id: <8306020546.13350@UCBKIM.ARPA>
Received: by UCBKIM.ARPA (3.340/3.5)
	id AA13350; 1 Jun 83 22:46:28 PDT (Wed)
Received: from UCBKIM.ARPA by UCBVAX.ARPA (3.341/3.31)
	id AA10232; 1 Jun 83 22:45:39 PDT (Wed)
To: rpg@su-ai

(780cpu,gc)/(750cpu,gc)  [seconds]

>>> boyer: test
					translinks 
			on				off

localf	    21.33,50.42/36.18,82.35		21.3,21.32/36.15,35.01

compiled    40.13,50.43/62.38,78.68		137.85,21.25/223.47,34.5

interpreted 1061.02,32.25/1699.95,51.77		1058.88,22.47/1685.92,36.73


∂06-Jun-83  1040	RPG   	boyer   
 ∂01-Jun-83  2317	jkf%UCBKIM@Berkeley 	boyer    
Received: from UCB-VAX by SU-AI with TCP/SMTP; 1 Jun 83  23:03:43 PDT
Date: 1 Jun 83 22:46:28 PDT (Wed)
From: jkf%UCBKIM@Berkeley (John Foderaro)
Subject: boyer
Message-Id: <8306020546.13350@UCBKIM.ARPA>
Received: by UCBKIM.ARPA (3.340/3.5)
	id AA13350; 1 Jun 83 22:46:28 PDT (Wed)
Received: from UCBKIM.ARPA by UCBVAX.ARPA (3.341/3.31)
	id AA10232; 1 Jun 83 22:45:39 PDT (Wed)
To: rpg@su-ai

(780cpu,gc)/(750cpu,gc)  [seconds]

>>> boyer: test
					translinks 
			on				off

localf	    21.33,50.42/36.18,82.35		21.3,21.32/36.15,35.01

compiled    40.13,50.43/62.38,78.68		137.85,21.25/223.47,34.5

interpreted 1061.02,32.25/1699.95,51.77		1058.88,22.47/1685.92,36.73


∂16-Jun-83  0945	RPG  
 ∂13-Jun-83  1857	PW  	MacLisp compatibility, HA!    
I worked over the weekend to get some reasonable machine time at Symbolics.
I've compiled and run everything, though some of the benches appear to still
be broken (eg. PUZZLE prints out "error" and "failure" on the LispM, but 
doesn't here). 

Probably a good way to proceed is to run and release the benches incrementally, 
rather than as a large group.  This involves:

	1) Meeting with you to make sure I know what's being tested (and in some
	   cases, what's being computed).

	2) Making sure the MacLisp compatible version is running properly by 
	   comparing it with a SAIL run.  Bench both the LM-2 and 3600.

	3) Translate to Common Lisp, making sure we haven't distorted the test.
	   Bench both the LM-2 and 3600.

	4) Make the results available.

In the CL version, I should comment things up, and make the programming style
consistent with Laser Edition recommendations.  I could also make some macros to 
deal with ZETALISP/CL conflicts.

If you have any kind of priority for these things, sort the following list and
mail it back to me.

	SCCPP
	TAK
	FRPOLY
	TAKL
	TAKR
	PUZZLE
	DERIV
	DDERIV
	FDDERIV
	FFT
	BROWSE
	TRAVERSE
	DIV2
	FPRINT
	FREAD
	TPRINT
	DESTRU
	TRIANG
	BOYER


Ciao.

Benchmarks
I am now preparing a set of statistics for each of these benchmarks.
Do them in any order. I can help you see if they are working for
any specific ones you're having trouble with. I have ne preference
on order. Come talk to me tomorrow, if you want.
			-rpg-

∂16-Jun-83  0945	RPG   	[fateman%UCBKIM@Berkeley (Richard Fateman): "macsyma on a chip?"]   
 ∂14-Jun-83  1335	@MIT-MC:BROOKS%MIT-OZ%MIT-MC@SU-DSN 	[fateman%UCBKIM@Berkeley (Richard Fateman): "macsyma on a chip?"]   
Received: from MIT-MC by SU-AI with TCP/SMTP; 14 Jun 83  13:34:22 PDT
Date: 14 Jun 1983 1628-EDT
From: BROOKS%MIT-OZ%MIT-MC@SU-DSN
Subject: [fateman%UCBKIM@Berkeley (Richard Fateman): "macsyma on a chip?"]
To: rpg@SU-AI

Return-path: <fateman%UCBKIM@UCB-VAX>
Received: from UCBKIM.ARPA by UCBVAX.ARPA (3.346/3.33)
	id AA13792; 14 Jun 83 11:48:57 PDT (Tue)
Date: 14 Jun 83 11:48:32 PDT (Tue)
From: fateman%UCBKIM@Berkeley (Richard Fateman)
Subject: "macsyma on a chip?"
Message-Id: <8306141848.6756@UCBKIM.ARPA>
Received: by UCBKIM.ARPA (3.340/3.5)
	id AA06756; 14 Jun 83 11:48:32 PDT (Tue)
To: macsyma-i@mit-mc
Cc: franz-friends%UCBKIM@Berkeley

Well, sort of. We now have Macsyma running on a Motorola 68000 - based
system with 6 megabytes of real memory. The operating system is a 
Unisoft UNIX system, which has been ported to some large number (>65) boxes.  
The Pixel people were kind enough to lend us a machine with enough 
real memory to make virtual memory unnecessary.

It takes a long time to load up, but once running, it is quite responsive,
and appears to be about 60% of a VAX 11/780 in terms of CPU time. 

We have not shaken down everything, but since the source code is unchanged
from the VAX, we expect the bugs to be limited to lisp compilation
glitches, or differences between versions of the UNIX system.

-------

∂16-Jun-83  1816	RPG   	Re: Varia    
 ∂16-Jun-83  1813	@MIT-MC:DLW%SCRC-TENEX%MIT-MC@SU-DSN 	Re: Varia        
Received: from MIT-MC by SU-AI with TCP/SMTP; 16 Jun 83  18:13:08 PDT
Date: 16 Jun 1983 2111-EDT
From: Daniel L. Weinreb <DLW%SCRC-TENEX%MIT-MC@SU-DSN>
Subject: Re: Varia    
To: RPG@SU-AI
cc: moon%SCRC-TENEX%MIT-MC@SU-DSN, jlk%SCRC-TENEX%MIT-MC@SU-DSN,
    mlb%SCRC-TENEX%MIT-MC@SU-DSN
In-Reply-To: The message of 16 Jun 83  1449 PDT from Dick Gabriel <RPG@SU-AI>

Regarding Portable Common Lisp, your understanding is basically correct.
Moon and I think that a Portable Common Lisp would be a great thing, and
we'd love to see it happen, but here at Symbolics we find we are already
quite overcomitted in the number of technical challenges we have taken on.
We are suffereing a lot of internal problems (shortage of internal
computer resources, spac, money, etc) because we are spread so thin and doing
so many things.  We find that we have got to learn how to say "no" to
a potentially fun new project, because we really have to
concentrate our resources on Lisp machine development.  The result is that
we can't devote resources to PCL for the next 6 to 12 months.  Howver, we'll
keep in touch, we'll be glad to talk things over, and, of course, we'll
continue to try to make the Lisp Machine itself support the full
Common Lisp standard specification.

Regarding the benchmarking issue, the last time I talked to JLK about this
(John Kulp is the VP of R&D and is the final authority on such things), he
said that in his opinion, since 3600s have been delivered to customer sites,
there is no way that we can stop anybody from running benchmarks on them
and distributing the results, and therefore he doesn't see any reason why
you should need any agreement from us to proceed with benchmarking.
Now, just to make sure I'm not misquoting him, I'll CC him this message and
mail him the message that you sent to us.  Do you have access to
a 3600?  If not, and you need to use the one at SPA, you'd have to
talk to MLB, and that machine is in pretty heavy use.  If you are going to do
substantial benchmarking work, there are peple here who would be good
to talk to; Bruce Edwards (BEE), in particular, is concentrating on performance
improvements and things like that; Dave Moon is the expert on the system
architecture; Dan Gerson is the expert on paging and disking,and is going
to start spending time soon working on issues of paging performance.
-------

∂26-Jun-83  1125	RPG  	Timings  
To:   PW@SU-AI, jkf%ucbkim@UCB-VAX, hedrick@RUTGERS
CC:   "#TIMING.MSG[TIM,LSP]"@SU-AI 

Folks, in place of SCCPP, please substitute the following 2 variations on
TAK. One does catch/throw, the other special binding in place of parameter
passing. These are simpler than SCCPP and test moe important things.

			-rpg-

;;; Begin CTAK
(declare 
 (fixnum (tak fixnum fixnum fixnum)))

(defun tak (x y z)
 (*catch 'tak (tak1 x y z)))

(defun tak1 (x y z)
       (cond ((not (< y x))	;x≤y
	      (*throw 'tak z))
	     (t (tak1
		 (*catch 'tak
			 (tak1 (1- x)
			       y
			       z))
		 (*catch 'tak
			 (tak1 (1- y)
			       z
			       x))
		 (*catch 'tak
			 (tak1 (1- z)
			       x
			       y))))))

(include "timer.lsp")
(timer timit (tak 18. 12. 6.))
;;; End CTAK

;;; Begin STAK
(declare 
 (fixnum (tak fixnum fixnum fixnum))
 (fixnum (stak))
 (special x y z)
 (fixnum x y z))

(defun tak (x y z)
  (stak))

(defun stak ()
       (cond ((not (< y x))	;x≤y
	      z)
	     (t (let ((x (let ((x (1- x))
			       (y y)
			       (z z))
			      (stak)))
		      (y (let ((x (1- y))
			       (y z)
			       (z x))
			      (stak)))
		      (z (let ((x (1- z))
			       (y x)
			       (z y))
			      (stak))))
		     (stak)))))

(include "timer.lsp")
(timer timit (tak 18. 12. 6.))
;;; End STAK

∂27-Jun-83  0934	RPG   	Re: BenchMarking  
 ∂27-Jun-83  0659	HEDRICK@RUTGERS.ARPA 	Re: BenchMarking  
Received: from RUTGERS by SU-AI with TCP/SMTP; 27 Jun 83  06:59:17 PDT
Date: 27 Jun 83 10:01:37 EDT
From: Dir LCSR Comp Facility <HEDRICK@RUTGERS.ARPA>
Subject: Re: BenchMarking  
To: RPG@SU-AI.ARPA
In-Reply-To: Message from "Dick Gabriel <RPG@SU-AI>" of 27 Jun 83 01:50:00 EDT

We are in the process of finishing Common Lisp and installing a new 20.
I will do it when I can.
-------

∂29-Jun-83  1037	RPG   	Benchmarking 
 ∂22-Jun-83  2038	@MIT-MC:DLW%SCRC-TENEX%MIT-MC@SU-DSN 	Benchmarking     
Received: from MIT-MC by SU-AI with TCP/SMTP; 22 Jun 83  20:38:38 PDT
Received: from SCRC-BORZOI by SCRC-TENEX with CHAOS; Wed 22-Jun-83 20:19:18-EDT
Date: Wednesday, 22 June 1983, 20:27-EDT
From: Daniel L. Weinreb <DLW%SCRC-TENEX%MIT-MC@SU-DSN>
Subject: Benchmarking 
To: RPG@SU-AI
In-reply-to: The message of 22 Jun 83 17:27-EDT from Dick Gabriel <RPG at SU-AI>

Gee, maybe a piece of mail got lost; I never heard that Paul Wieneke was
running the benchmarks.  Well, that's good to hear; I forwarded the
message to relevant people.  And congratulations regarding the money!

∂29-Jun-83  1038	RPG   	Benchmarking 
 ∂22-Jun-83  1342	@MIT-MC:DLW%SCRC-TENEX%MIT-MC@SU-DSN 	Benchmarking
Received: from MIT-MC by SU-AI with TCP/SMTP; 22 Jun 83  13:42:22 PDT
Received: from SCRC-BORZOI by SCRC-TENEX with CHAOS; Wed 22-Jun-83 15:56:56-EDT
Date: Wednesday, 22 June 1983, 15:55-EDT
From: Daniel L. Weinreb <DLW%SCRC-TENEX%MIT-MC@SU-DSN>
Subject: Benchmarking
To: rpg@SU-AI
Cc: moon%SCRC-TENEX%MIT-MC@SU-DSN, bee%SCRC-TENEX%MIT-MC@SU-DSN,
    jlk%SCRC-TENEX%MIT-MC@SU-DSN

I never heard from you again about benchmarking.  Let me put it this
way.  Hi, Dick.  We want to be benchmarked.  We want to cooperate with
you.  We want to participate in your comparisons.  We hope that this
work will help our own efforts to improve our performance, and we look
forward to benefiting from your experience with tuning Lisp systems.
Tell me what we can do to get started.  RSVP.  Thank you.

Ah, as I thought I mentioned, Paul Wieneke has been over at the Palo Alto
office and has been benchmarking for about 3 weeks now, but MLB was uncertain
how you felt about it. I forwarded your message to PW who showed it to MLB,
who is now happily co-operating (instead of nervously co-operating). As I
understand it, MLB and PW are preparing a bunch of results for you guys to
look at in one swipe. I think they have almost completed my suite of 19
benchmarks. So, what you can do is keep up the good work.

On a related matter, ARPA has ****finally**** sent us some equipment money,
and we are about to order some 3600s. I suppose this means we'll get them
in about 10 years, but better intolerably late than never. I've already
specified that one goes in my office and one goes in my study at home.
Maybe one at my girlfriend's house is a good idea.
			-rpg-

∂29-Jun-83  1049	RPG   	Re: BenchMarking  
 ∂29-Jun-83  1034	jkf%UCBKIM@Berkeley 	Re: BenchMarking   
Received: from UCB-VAX by SU-AI with TCP/SMTP; 29 Jun 83  10:34:04 PDT
Received: from UCBKIM.ARPA by UCBVAX.ARPA (3.346/3.33)
	id AA18706; 29 Jun 83 10:33:30 PDT (Wed)
Date: 29 Jun 83 09:04:48 PDT (Wed)
From: jkf%UCBKIM@Berkeley (John Foderaro)
Subject: Re: BenchMarking  
Message-Id: <8306291604.9580@UCBKIM.ARPA>
Received: by UCBKIM.ARPA (3.340/3.5)
	id AA09580; 29 Jun 83 09:04:48 PDT (Wed)
To: RPG@SU-AI
In-Reply-To: Your message of 26 Jun 83  2250 PDT

 sorry, I've always got things to do that are due yesterday but I will
get those benchmarks done.


I'm just sending friendly reminders. I now have a chart of 3600, Dolphin,
Dorado, LM-2, MacLisp, and Franz times. Very interesting, but numerous
non-disclosure agreements prevent me from saying any more.
			-rpg-

 ∂18-Jul-83  2228	RPG   	effect of compiler optimizations on benchmarks  
To:   "#TIMING.MSG[TIM,LSP]"
 ∂18-Jul-83  2049	Masinter.PA@PARC-MAXC.ARPA 	effect of compiler optimizations on benchmarks 
Received: from PARC-MAXC by SU-AI with TCP/SMTP; 18 Jul 83  20:49:17 PDT
Date: 18 Jul 83 20:49:49 PDT (Monday)
From: Masinter.PA@PARC-MAXC.ARPA
Subject: effect of compiler optimizations on benchmarks
To: RPG@SU-AI.ARPA
cc: Masinter.PA@PARC-MAXC.ARPA

Most of the benchmarks don't reflect the importance of compiler
optimizations on "typical user" code. (I guess this is OK.) If naive
users write cruddy code, the compiler should fix it up for them, yes?
I'd say that most Stanford PhD's in Compiler Science qualify as "naive
users".

Yet which of your benchmarks reflect this to any degree?

Larry


Lousy code: This is a good point. Compilers that improve users' code
are not common, and my own view is that it is undersirable as first
approximation. Here's why I feel that way, but I'm not saying this is
how it should be. Lisp is an `assembly language.'  It allows you to
express the exact computation you desire. Every optimization shy of
scheduling and peephole optimization is expressible as Lisp code. Since
this isn't true of imperative languages, compilers for those languages must
supply optimizations.

In the S-1 compiler, I view such optimizations as dead code removal, 
substitutions of values for variables etc as useful only for compiler-generated
Lisp code.

Thus, I strive to write exactly the code I mean, and I expect to sacrifice
speed for clarity in places.

I understand that this is not a universal belief.

			-rpg-

------- End undelivered message -------

∂19-Jul-83  1214	@MIT-MC:BEE%SCRC-TENEX%MIT-MC@SU-DSN 	Benchmark results
Received: from MIT-MC by SU-AI with TCP/SMTP; 19 Jul 83  12:14:19 PDT
Received: from SCRC-MUDDY by SCRC-TENEX with CHAOS; Tue 19-Jul-83 15:12:56-EDT
Date: Tuesday, 19 July 1983, 15:11-EDT
From: Bruce Edwards <BEE%SCRC-TENEX%MIT-MC@SU-DSN>
Subject: Benchmark results
To: rpg@SU-AI, pw@SU-AI
Cc: DLW%SCRC-TENEX%MIT-MC@SU-DSN, RSL%SCRC-TENEX%MIT-MC@SU-DSN,
    Benson%SCRC-TENEX%MIT-MC@SU-DSN, BEE%SCRC-TENEX%MIT-MC@SU-DSN

Here are the latest benchmark results. These results are not the result
of specific tuning yet, however they are run on the most current system
with the most current microcode. There is still likely to be some
performance improvements shortly, in the area of reducing the paging
time, and also improving the speed of consing and so on. Most of the
speedup was achieved by general system improvement, and by microcoding a
portion of CONS, MEMQ, ASSQ, and GET. These were in the works, however,
they were never quite got around to before. The first times are "Wall
clock" times, and the "Wall clock" paging times are estimated by a
metering tool. I believe that the best comparison of the results would
be the 3600 minus paging time versus the KL10 minus the GC time. In this
case, the paging would be factored out of both systems, and the GC time
is factored out of both becase the correct GC is not implemented on the
3600 yet. I never ran these with the GC on, but I guess that would be
possible. I dont know if you can reproduce these numbers exactly on the
3600's at SPA, but if you have problems contact me.

File: POINTER:>BEE>Performance>Benchmarks.text
----------------------------------------------------------------
-*- Mode: Text -*-

Symbolics System, >world2.load
2032K Physical memory, 15000K Swapping space.
 Experimental System    232.139
 Experimental Hardcopy   15.6
 Experimental Zmail      79.9
 Experimental LMFS       33.28
 Tape                    19.3
 Cold load               84
 Microcode TMC5-MIC     108
 FEP                     13
Symbolics Muddy

		Wall clock	Page time (estimate)	Run time
----------------------------------------------------------------
Boyer:		21.5		7.52			14
----------------------------------------------------------------
Browse:		48.6		16.52			32.1
----------------------------------------------------------------
CTAK:		8.84		0			8.84
----------------------------------------------------------------
DDERIV:
FDDERIV:	15.2		.6			14.6
----------------------------------------------------------------
DERIV:		17.5		5.95			11.55
----------------------------------------------------------------
DESTRU:		5.63		1.69			3.94
----------------------------------------------------------------
DIV2:
  (iterative)	8.8		4.0			4.8
  (recursive)	11.2		4.92			6.27
----------------------------------------------------------------
FFT:		4.77		0			4.77
----------------------------------------------------------------
FPRINT:		3.2		.1			3.1
  (this should be about 5% faster in the next system)
----------------------------------------------------------------
FREAD:		6.25		0			6.25
----------------------------------------------------------------
FRPOLY:
 Power=2
   r:		.052
   r2:		.061
   r3:		.053
 Power=5
   r:		.52
   r2:		.69
   r3:		.53
 Power=10
   r:		6.04
   r2:		10.15
   r3:		6.2
 Power=15
   r:		41.2		0			41.2
   r2:		85		9.35			75.65
   r3:		42		1.7			40.3
   (fully 30% of the power 15 time is going into macrocoded bignums. In
    the next system these will be faster, but I cannot accurately
    estimate the speedup. Could be as much as a factor of 2 which would
    make the total times 15% faster, but that is really speculation)
----------------------------------------------------------------
PUZZLE:		14.2		0			14.2
----------------------------------------------------------------
STAK:		2.48		0			2.48
----------------------------------------------------------------
TAK:		.59		0			.59
----------------------------------------------------------------
TAKL:		6.32		0			6.32
----------------------------------------------------------------
TAKR:		.59		0			.59
----------------------------------------------------------------
TPRINT:		7.8		0			7.8
  (69% of this is in character drawing, and probably should be factored
   out for comparison to the KL. In addition another 4% should be saved
   in the next system) Given those estimated speed-ups, we should expect
   numbers like:)
		1.4		0			1.4
----------------------------------------------------------------
TRAVERSE:
  (initialize)	13.5		1.2			12.3
   (traverse)	51.23		0			51.23
----------------------------------------------------------------
TRIANG:		158.1		0			158.1
----------------------------------------------------------------

∂20-Jul-83  0621	@MIT-MC:DLW%SCRC-TENEX%MIT-MC@SU-DSN 	Comparitive benchmarking   
Received: from MIT-MC by SU-AI with TCP/SMTP; 20 Jul 83  06:21:25 PDT
Received: from SCRC-BORZOI by SCRC-TENEX with CHAOS; Wed 20-Jul-83 09:18:53-EDT
Date: Wednesday, 20 July 1983, 09:17-EDT
From: Daniel L. Weinreb <DLW%SCRC-TENEX%MIT-MC@SU-DSN>
Subject: Comparitive benchmarking
To: rpg@SU-AI
Cc: bee%SCRC-TENEX%MIT-MC@SU-DSN, mlb%SPA-NIMBUS%MIT-MC@SU-DSN,
    pw%SPA-NIMBUS%MIT-MC@SU-DSN, jlk%SCRC-TENEX%MIT-MC@SU-DSN

I notice that in BEE's comparison chart, the KL-10 numbers he's looking
at appears to be runtime as reported by the operating system.  Of
course, this omits paging overhead, and other operating system overhead,
whereas our own numbers are stopwatch times.  We can accurately tell you
how much of our time is in paging, and subtract that, to make a
comparison that tries to ignore paging.  But such a comparison is not
the only meaningful comparison.  I actually would feel much better about
a comparison of stop-watch times of both systems.  Of course, any final
report you produce should include all numbers; what I'm really saying is
that I think your report would be much more interesting and useful if
you were ALSO able to provide some stop-watch timings of a completely
unloaded KL-10, instead of exclusively providing the rather artificial
numbers returned by the operating system.  Do you think there's any chance
that you could get these numbers?  Thanks.

Dan:
When I time the benchmarks on SAIL I gather the following information:
CPU (EBOX) time, GC (EBOX) time, WHOLINE time (this is EBOX + MBOX time),
Elapsed time, and the load averages before and after the run.

I have run many of my benchmarks with SAIL down to users. WHOLINE time
seems to correspond to elapsed time in that case. We have very slow main
memory (1.5 mic), but we do not page (nor swap for that matter with 2.5
megawords of memory). Below I've included the transcript of my timing of
TRAVERSE. The first report is the initialize time and the second report is
the traverse time, if you are familiar with the benchmark. The elapsed
time in this case seems to depend on both the load averages and,
consequently, on the reduced cache-hit rate.

It happens that on SAIL there is a simple mathematical relationship
between the EBOX time, the size of the program run, the speed of the
memory, and the Wholine time. Thus I can pretty much predict wholine time
from EBOX time for SAIL. If we paged I think there wouldn't be such a simple
relationship.

My intent is to provide all these numbers in my report. I will state that
the WHOLINE time is the main comparison time to use since it corresponds
to elapsed time on an unloaded KL-10. The charts I use to compare things
for my own benefit use WHOLINE time. I may re-run all of my benchmarks on
SAIL with the cache turned off and the system down to users to bracket the
time.  Then people only need to discover their page-time function to
bracket expected elapsed time.  That is, I may decide to provide the
following data for each benchmark:

Unloaded, cache on, wholine time = t1
Unloaded, cache off, wholine time = t2

Then from all of this data I can derive the following function:

a function of cache-on speed and memory speed, f1, such that  f1 (t1,m) = t2. 
The hope is that this function, derived from SAIL, works for all KL-10s.

Then people who know their page-time function, f2, will know that the
elapsed time will range between:

f2(t1,l) and f2(f1(t1,m),l)	l = load average

Thus, the best they can do is the page-time loss on top of the time that
assumes they are in the cache all the time and the page-time loss on
top of the time that assumes they are never in the cache.

Timing performed on Monday 04/04/83 at 12:51:18.
Cpu (- GC) Time = 6.689
Elapsed Time = 391.633335
Wholine Time = 90.15
GC Time = 45.174
Load Average Before  = 1.34567666
Load Average After   = 5.16910934
Average Load Average = 3.257393
NIL 

Timing performed on Monday 04/04/83 at 12:57:51.
Cpu (- GC) Time = 24.014
Elapsed Time = 241.833334
Wholine Time = 49.2166667
GC Time = 0.0
Load Average Before  = 5.1550678
Load Average After   = 4.9717611
Average Load Average = 5.06341445

∂21-Jul-83  0657	@MIT-MC:DLW%SCRC-TENEX%MIT-MC@SU-DSN 	Comparative Benchmarks     
Received: from MIT-MC by SU-AI with TCP/SMTP; 21 Jul 83  06:56:52 PDT
Received: from SCRC-BORZOI by SCRC-TENEX with CHAOS; Thu 21-Jul-83 09:58:08-EDT
Date: Thursday, 21 July 1983, 09:56-EDT
From: Daniel L. Weinreb <DLW%SCRC-TENEX%MIT-MC@SU-DSN>
Subject: Comparative Benchmarks  
To: RPG@SU-AI
Cc: bee%SCRC-TENEX%MIT-MC@SU-DSN, mlb%SPA-NIMBUS%MIT-MC@SU-DSN, pw@SU-AI,
    jlk%SCRC-TENEX%MIT-MC@SU-DSN, dang%SCRC-TENEX%MIT-MC@SU-DSN,
    dlw%SCRC-TENEX%MIT-MC@SU-DSN
In-reply-to: The message of 20 Jul 83 13:42-EDT from Dick Gabriel <RPG at SU-AI>

I see.  OK, if your experience is that wholine time really does
accurately reflect the wall-clock time for an unloaded KL, then I belive
it.  I'll be interested to see the results in your report that show how
load average affects elapsed time.  I bet some people think it's a
simple multiplier, and I bet it turns out to be much more complicated.

Some of our metering shows that we DO spend time in paging on some of
your test.  The reason is that when new pages are created, you need to
clear out some main memory, and this means finding flushable virtual
pages, some of which are "dirty" and need to be written out.  Our system
makes various attempts to write out "dirty" pages "ahead", underneath
page faults, but it does NOT have a background process that runs around
writing them out.  Thus, if you leave a Lisp Machine alone for hours and
come back to it, there are still dirty pages lying around.  This
certainly wouldn't be true of Multics or Amber; they would use the spare
capacity to write out the pages.  I wouldn't be surprised if WAITS does
this too.  Thus, our performance is actually being adversely affected by
our operating system.

If we ran these benchmarks again, but first ran a special function that
writes out all dirty pages to the disk, then our times might come out
somewhat better for those benchmarks that are spending time in paging
(boyer, browse, deriv, destru, and both div2s spend around 30% in
paging!).  Would you be interested in having this set of numbers to add
to all your other sets of numbers?  (I begin to appreciate the magnitude
of your task; good thing we don't have a cache, isn't it?)

Another set of numbers might be obtainable by letting us to the above
AND run with a machine that has lots of main memory.  (After all, it's a
true fact about the 3600 that its backplane is capable of holding lots
of memory and you buy a loaded one if you are similarly loaded with $.)
I'd just like to create a set of results that don't include paging time,
because it's really true that you don't have to page if you arrange not to.

Thanks.

∂22-Jul-83  0013	MASINTER.PA@PARC-MAXC.ARPA 	various
Received: from PARC-MAXC by SU-AI with TCP/SMTP; 22 Jul 83  00:13:09 PDT
Date: 22 JUL 83 00:12 PDT
From: MASINTER.PA@PARC-MAXC.ARPA
Subject: various
To: RPG@SU-AI.ARPA
cc: masinter.PA@PARC-MAXC.ARPA

Dick, I can't imagine how it could do any harm for you to discuss
Dolphin timings with the purchasing comittee -- after all, Stanford
have a  number of Dolphins and you or others could certainly
run the same tests there.

In addition, there are a number of Stanford folks who regularly
run both on Dolphins and Dorados. I can't see any problem in their
discussing the relative performance of their applications with
the Stanford committee.

On the other hand, I wonder what your role in that should be.
I think your position wrt Xerox and Symbolics is quite tenuous.
This benchmark business is so clearly part of their marketing
strategy that we should avoid taking any actions which might
play into their hand -- they certainly don't have a good record
for being aboveboard.

I've asked to  be let out of this one & have deferred any
decision on release of "benchmark" data to Sheil, who has promised
me some response by Monday.

I know Sheil had a long conversation with Rindfleisch, although
I don't know if the subject of benchmarks came up.

∂22-Jul-83  0358	@MIT-MC:DLW@SCRC-TENEX 	Using our figures for Stanford 
Received: from MIT-MC by SU-AI with TCP/SMTP; 22 Jul 83  03:58:25 PDT
Date: Friday, 22 July 1983  06:54-EDT
From: DLW at SCRC-TENEX
to:   rpg at su-ai
cc:   bee at SCRC-TENEX, jlk at SCRC-TENEX, h at SCRC-TENEX
Subject: Using our figures for Stanford

You are granted official permission to use BEE's latest figures at your
meeting with Stanford.  However, we'd still like you not to generally
release these numbers, since we still have some more cycles to squeeze out
of them.

You might explain to the Stanford folks why we think we can make these
numbers better than they are now.  One reason is that if we had
the temp area GC (not yet implemented, but it will be eventually),
there would be much less page creation, because pages would get
reused.  If we were allowed to write out dirty pages before starting,
that might also help; I mentioned this to you in an earlier message.
This could speed up some of the cons-intensive ones by about 20%.

Some examples are bignum-intensive.  Bignums are currently implemented
in Lisp and have NOT been tuned practically at all!  (BEE is too
busy to do all these things instantly.)  We will certainly be
putting lots of effort into speeding up bignums!

Aside from these cases, the rest of the numbers are probably within
10-20% of the final figures for a 3600 without IFU.  We still have
some compiler optimizers that would help, for example.  Also, do
point out to them that the IFU might make some of these things faster,
although it's hard to say right now.  (The IFU is in wire-wrap and
debugging is in progress at SCRC, by the way.  It will be an extra-cost
option on new machines, althogh some old machines will be retrofitted
for free since we promised IFUs to early customers when we thought
it would eventually be standard.)

Good luck at your meeting.

∂05-Aug-83  2212	GSB@MIT-ML 	NIL Benchmarks    
Received: from MIT-ML by SU-AI with TCP/SMTP; 5 Aug 83  22:11:45 PDT
Date: 6 August 1983 01:04 EDT
From: Glenn S. Burke <GSB @ MIT-ML>
Subject: NIL Benchmarks
To: rpg @ SU-AI

Here it is...
------------------------------------------
Here's the results of running all of the benchmarks.  All were run in
NIL version 259.9 or thereabouts (some were run in an earlier one,
but i deferred those which mattered until i got that NIL up on my vax).
The vax is a 750, 2 mb of memory, no floating-point accelerator or
extra microcode.  There is no GC, hence no gc-overhead;  however, there
is dynamic expansion of the heap every once in a while, so that shows
up some, mainly in pagefault time.  I did turn off printing of heap-
expansion messages however so there at least are no calls to FORMAT
appearing randomly in the time.

Generally, i tried to first run the benchmark as close to as-is as
possible, and then to fix it to "run in nil" according to the semantics
of how it was written.  Typically this means changing the maclisp
fixnum-only arithmetic functions to be NIL fixnum-only functions (by
adding "&" to the end of the name...).  Array hacking is a bit of a
problem, for although NIL has the basis for common-lisp arrays now, the
compiler does not handle declarations for them and so does nothing
nice.  The current compiler also does not handle type-declarations
for local variables, so, for instance, flonum consing can happen
quite a bit.  (This behavior also defeats the use of flonum arrays
since they have to cons the result to return it.)  So typically I do
most array hacking by turning them into whatever make-array does, and
using aref.  For those which are simple vectors, i also try it using
SGVREF, which open-compiles nicely.  Also in cases where other
operations might have error-checking turned off, i try it that way.  By
default, car, cdr, etc., are compiled as subroutine calls to NIL kernel
subroutines which perform error checking.  This can be turned off
with a compiler switch;  in the next compilier, this will be controlled
by use of INLINE and/or OPTIMIZE declarations.  I believe that there
are no transformations I have made here which are not realizable by
reading the current NIL documentation, and, in fact, i have often NOT
done certain optimizations which would be considered highly NIL
specific (for instance, i use common-lisp REPLACE on the result of
make-list [the coerce function isn't in yet] and a simple vector when i
could have called the function which converts a simple vector to a
list).

We have a tendency towards gratuitous maclisp compatibility;  for
instance, maclisp array usage should work compatibly.  However, some of
that code got lost or damaged when i implemented the common-lisp
arrays, and it has not all been fixed up.  My impression is, anyway,
that the intent is not to benchmark in maclisp-compatibility mode,
but rather the functionally equivalent code as it would be written in
the implementation.

In those cases where there are multiple times specified (most of them),
presumably the one you want is the fastest, as none of them have had
gross kludging applied to them.  Those with error-checking and overly-
generic functions are presented for comparison.  If nothing else,
they are reasonable for us as a comparison.  All of the code
modifications are noted here (this is essentially a file i kept such
notes in).
!
----------------------------------------------------------------
;;;	Friday  July 22,1983  2:16  FQ+4D.16H.41M.10S.  -*- Text -*-
BOYER

Pass 1.  Changes to preserve semantics:
(member x y) => (member x y :test #'equal)

try 1:
cpu=116.5, elapsed=119.9, pagefaults=3907

Try 2:
cpu=115.76, elapsed=120.70, pagefaults=3961

Note...   I ran this part several times while experimenting with
getting VMS to let me have the machine to myself (and not play musical
pages with me).  As i have noticed before, the times vary;  in this
case, from as little as 114 seconds (surprisingly, on one which paged
more), to the 120 here (again surprisingly, which was set to page less).
----------------
Pass 2.  Change, in TRANS-OF-IMPLIES1, (EQUAL N 1) -> (EQL N 1).
	 Turn ON open-compilation of CAR, CDR, RPLACA, etc.
Try 1:
cpu=81.33,elapsed=83.78,pagefaults=3965
Try 2:
cpu=81.69,elapsed=85.11,pagefaults=3993
----
Comments.  Heavy on function calls;  this will show in NIL.
Probably heavy on use of EQUAL.  The NIL EQUAL function leaves
much to be desired in efficiency (partly from function calls),
especially in the simple cases.  EQUAL is probably going to be
written in macro-32 soon, to handle the simple cases.
----------------------------------------------------------------
!
BROWSE

Pass 1:  changes to preserve semantics:
	(subst nil nil ...) -> (copy-tree ...)
	Change arithmetic functions to NIL's fixnum-only versions.
	Bleagh!  This bashes my nifty-keen random number generator!
	Well, i don't think anything needs it.

Try 1:
cpu=1099.84,elapsed=1226.54,pagefaults=9664

The above time includes about a minute or two of poking around with
the debugger to see if the lisp had broken.  I will not try a second
time to factor that out.

My face is red.  I think what is going on here is that INTERN is a big
loser.  For various brain-damaged reasons, INTERN is not what is
called by either the compiled-code loader, or the reader.  Because it
is so brain-damaged but not really used (except in cases like this!),
its fixing has been put well towards the end of the queue.  (The code
will be all-new anyway when common-lisp packages are put in.)

Because of this i'm not going to bother bumming anything (like
carcdr-switch, or even optimizing GETCHAR usage in any way).
----------------------------------------------------------------
!
CTAK

Changed to fixnum-only arithmetic functions.  [I will give you some
generic-arithmetic TAK times with ordinary TAK, for kicks.]

cpu=9.92,elapsed=9.92,pagefaults=0
----------------------------------------------------------------
!
DDERIV

Fix fixnum-only functions (probably negligible in this case).
(Fortuitously, NIL compilation of (mapcar 'foo ...) works the same as
(mapcar #'foo ...), even though technically it should not.)

Try 1:
cpu=34.06,elapsed=35.35,pagefaults=4733
Try 2:
cpu=34.1,elapsed=35.38,pagefaults=4698

Totally inline-coded carcdr (etc.).  (Note the current mapcar uses
the "normal", i.e. error-checking, car/cdr/rplaca/rplacd.  I have an
uninstalled "fix" for this somewhere or other.)

cpu=27.36,elapsed=29.08,pagefaults=4697
cpu=27.38,elapsed=28.81,pagefaults=4709
cpu=26.6,elapsed=29.0,pagefaults=4733
----------------------------------------------------------------
!
DERIV

cpu=31.15,elapsed=32.83,pagefaults=4428
cpu=31.14,elapsed=33.26,pagefaults=4461
cpu=31.31,elapsed=33.42,pagefaults=4469

Inline carcdr:

cpu=22.91,elapsed=24.44,pagefaults=4442
cpu=22.69,elapsed=24.63,pagefaults=4468
cpu=22.93,elapsed=25.37,pagefaults=4478
----------------------------------------------------------------
!
DESTRU

Fixnum arithmetic, as always.
Curio:  NCONC uses LAST which does error and circularity checking.

cpu=14.65,elapsed=15.14,pagefaults=749
cpu=14.5,elapsed=14.68,pagefaults=724
cpu=14.6,elapsed=14.74,pagefaults=722

Inline carcdr:
cpu=9.09,elapsed=10.01,pagefaults=738
cpu=8.95,elapsed=8.96,pagefaults=734
cpu=9.02,elapsed=9.15,pagefaults=712

Another curio:  Use of SETF instead of RPLACA/D would result in
infinitessimally better code because of the different return value.
Not bothering to try this.
----------------------------------------------------------------
!
DIV2

Fixnum arithmetic changes.

Iterative test:
cpu=17.06,elapsed=17.96,pagefaults=2024
cpu=16.92,elapsed=17.42,pagefaults=1994
Recursive test:
cpu=22.57,elapsed=23.13,pagefaults=2053
cpu=22.57,elapsed=23.03,pagefaults=2060

Inline carcdr:
Iterative test:
cpu=9.53,elapsed=10.32,pagefaults=2044,
cpu=9.44,elapsed=9.94,pagefaults=2062
Recursive test:
cpu=14.82,elapsed=15.52,pagefaults=2016
cpu=14.7,elapsed=15.15,pagefaults=2033
----------------------------------------------------------------
!
FDDERIV

Fixnum arithmetic.
We don't support that format of defun.  However, we have a sort
of subrcall, in that i can bind a lexical function to a
compiled-function, and the call to it will be "direct", the same as if
it had been an ordinary function call.  That is, assuming that the
DERIV property of something is a compiled function, i can change
(DEFUN DERIV (A)
   (COND ((ATOM A) (COND ((EQ A 'X) 1) (T 0)))
	 (T (LET ((DERIV (GET (CAR A) 'DERIV)))
	      (COND (DERIV (SUBRCALL T DERIV (CDR A)))
		    (T 'ERROR))))))
to be
(DEFUN DERIV (A)
   (COND ((ATOM A) (COND ((EQ A 'X) 1) (T 0)))
	 (T (LET ((DERIV (GET (CAR A) 'DERIV)))
	      (COND (DERIV ((lambda ((&function f)) (f (cdr a))) deriv))
		    (T 'ERROR))))))
This is sort of the primitive function-cell binding from which FLET
is built.

cpu=33.22,elapsed=34.93,pagefaults=4721
cpu=33.03,elapsed=34.33,pagefaults=4677

Inline carcdr:
cpu=26.52,elapsed=28.41,pagefaults=4748
cpu=26.45,elapsed=28.29,pagefaults=4721
----------------------------------------------------------------
!
FFT

Removed binding/init of PI.  PI is a DEFCONSTANT constant in NIL.

I hate this one.  I've played with it before.  I will give two
different ways of doing it.

Version A:  using an array of :element-type double-float.  This is a
joke in the current nil, because every access will have to cons
because there is no way to declare such an access.  The access is done
with AREF (= old nil VREF when one dimensional).  I'm including this
for joke value, sort of.  Probably most of the time is spent flonum-
consing and paging in that memory.
cpu=221.21,elapsed=220.86,pagefaults=17134

Version B:  use a simple [general] vector which happens to contain
flonums, and access it with SGVREF (remember that?).
cpu-35.59,elapsed=38.17,pagefaults=5674

Version C (if i get the energy) will use just an untyped 1-dimensional
array and AREF.  The vector will of course be a simple general vector,
but it will be using generic vector referencing.

In all three cases, the following happens.  Nested open-compiled
flonum functions do not cons intermediate results.  However, computed
flonums put anywhere, including passing them as arguments to all but a
select few functions (of which i think COS and SIN are examples), will
cons them.  Saving as local variables conses also.
Note also we are using double-floats here (all we support
at the moment):  64 bit vax d←floats.

There is one additional bum i am NOT using, because i do not have
"nice" semantics to use it.  That is, FLOAT of a fixnum can be
open-compiled (we have an IFLOAT primitive, but it's supposedly not
"public").  This would turn into a single vax instruction, and also
eliminate a couple flonum conses as described above.
----------------------------------------------------------------
!
FPRINT

Fixnum arithmetic.

Hmmm.  I think i may have just found a bug in the charpos-manipulator
mixin for output streams which causes it, when the charpos exceeds
the line length (default 80), to go searching hopelessly for a
line-wrap method.  At the same time not resetting the charpos (after
all, the line did not wrap), so doing this on every character after
number 79.

I should add there is a sort of bug in benchmarking i/o in this manner
anyway for systems which do record i/o, line-oriented.  If you give
super-long lines then it has to keep expanding the buffer, so you are
measuring that, rather than the i/o as it would "normally" occur.

In NIL this does not occur, because I have not upgraded the primitive
disk i/o support above the early primitives written in bliss.  They
don't handle records longer than 512 bytes, so cheat by just writing
them out, and on reading, cheat by saying that if the record is
exactly 512 bytes long, then we don't insert the implicit newline at
the end of it.  So for NIL at least read/print/read  holds, i hope, as
i go to try FREAD...

So for whatever it is worth, the time for this was
cpu=131.93,elapsed=133.11,pagefaults=167
but i would not quote it.

Fixing the bug (it is a patch in the current distribution), we get:
cpu=37.92,elapsed=38.97,pagefaults=144
cpu=37.65,elapsed=38.23,pagefaults=4

NIL file i/o is yet another thing which is going to be replaced
totally.  I believe i will be eliminating one Lisp function call and
maybe one other VAX procedure call per character output.
----------------------------------------------------------------
!
FREAD

cpu=28.11,elapsed=29.55,pagefaults=190
cpu=27.78,elapsed=28.02,pagefaults=129
----------------------------------------------------------------
!
FRPOLY

Fixnum arithmetic.
(defmacro pzerop (x)
  ;`(signp e ,x)
  `(let ((%foo% ,x)) (and (numberp %foo%) (zerop %foo%)))
  )
Hmmm.  There is no PDIFFER1 function?

First pass.
(bench 2)
    cpu=0.05,elapsed=0.05,pf=1
    cpu=0.51,elapsed=1.62,pf=64
    cpu=0.06,elapsed=0.06,pf=2
(bench 5)
    cpu=0.5,elapsed=0.51,pf=16
    cpu=2.28,elapsed=2.28,pf=31
    cpu=0.55,elapsed=0.55,pf=21
(bench 10)
    cpu=5.97,elapsed=5.97,pf=150
    cpu=40.49,elapsed=40.73,pf=591
    cpu=6.8,elapsed=6.8,pf=21
(bench 15)
    cpu=40.47,elapsed=41.01,pf=862
    cpu=492.7,elapsed=499.78,pf=5668
    cpu=44.28,elapsed=45.34,pf=1566

Inline carcdr:
(bench 2)
    cpu=0.04,elapsed=0.04,pf=2
    cpu=0.15,elapsed=0.15,pf=5
    cpu=0.05,elapsed=0.05,pf=3
(bench 5)
    cpu=0.36,elapsed=0.36,pf=15
    cpu=2.15,elapsed=2.15,pf=39
    cpu=0.42,elapsed=0.43,pf=22
(bench 10)
    cpu=3.85,elapsed=3.88,pf=158
    cpu=38.71,elapsed=38.99,pf=574
    cpu=4.64,elapsed=4.66,pf=284
(bench 15)
    cpu=24.93,elapsed=25.81,pf=833
    cpu=479.48,elapsed=482.87,pf=5572
    cpu=30.7,elapsed=31.18,pf=1556
----------------------------------------------------------------
!
PUZZLE

Fixnum arithmetic, as always.

Make the following macro definition
(defmacro deftable (name type &rest dimensions)
  (let ((tabvar (symbolconc '* name '-table*)))
    `(progn 'compile
       (defparameter ,tabvar
	 (make-array (list ,@dimensions))
       ,.(cond ((eq type 'fixnum) `((fill ,tabvar 0)))
	       ((eq type 'flonum) `((fill ,tabvar 0.0))))
       (defmacro ,name (&rest dimensions)
	 ,(cond ((= (length dimensions) 1) ``(sgvref ,',tabvar ,@dimensions))
		(t ``(aref ,',tabvar ,@dimensions)))))))
and change calls to ARRAY to DEFTABLE, calls to STORE to SETF.
(Hopefully not causing order-of-evaluation problems.)  To really bum
this i could use SGVREF for the one-dimensional cases.

cpu=497.85,elapsed=498.02,pagefaults=4

Presumably this is because 2-dim aref is losing its lunch.  I think that
that overshadows the generic reference to the one-dimensional case, so i'm
not going to bother doing that case.  (The multi-dimensional generic aref
minisubr has been mostly written, but got bumped down the queue some time 
ago so never got debugged nor installed.)  A new compiler and some array
declarations would work wonders.
----------------------------------------------------------------
!
STAK

Fixnum-only arithmetic.
cpu=23.15,elapsed=23.24,pf=0

I've been going through the nil binding and value-cell code recently,
and have found one of the typical brainos of large systems.  The
binding stuff seems to think that it is doing the world a favor by
saving and restoring all registers it uses.  I don't think there is a
single piece of code which accepts this favor.  (More typically, kernel
subroutines just "document" the registers they save, but i have found
this to be more of a pain than it is worth if i have to go and fix
something.  I have reason to believe that this has hurt maclisp at
points, as i think that various functions/subroutines have to push/
pop registers which earlier versions of them did not use.)
----------------------------------------------------------------
!
TAK

Generic arithmetic (!):
cpu=8.04,elapsed=8.05,pf=0

Number-consing version should be the same:
cpu=8.02,elapsed=8.03,pf=0

Fixnum-only arithmetic (presumably the "real" one):
cpu=4.16,elapsed=4.16,pf=0
----------------------------------------------------------------
!
TAKL

cpu=61.12,elapsed=61.3,pf=0

Inline-carcdr:
cpu=39.13,39.27,pf=0
----------------------------------------------------------------
!
TAKR
cpu=5.71,elapsed=5.71,pf=0
[Why?  I don't know why.  I don't know anything about the hardware
here.  I did it a couple times to make sure.]
----------------------------------------------------------------
!
TRIANGLE

Fixnum arithmetic.  Named arrays turned into vectors (1-d arrays),
in special variables; e.g.,
(defparameter *b*
  (make-array 37))
and all the references turned to AREF, STORE to SETF.  FILLARRAY in setups
turned to REPLACE (there was one fewer element in the list than in the
array!).  (cdr (listarray 'sequence)) turns into
(replace (make-list (1-& sequence-length)) *sequence* :start2 1), and
sequence-length is defined with *sequence*, by
(defconstant sequence-length 14).
cpu=1791.88,elapsed=1797.71,pf=238

Try 2:  change AREF to SGVREF.
cpu=649.73,elapsed=652.35,pf=230
----------------------------------------------------------------
!
TRAVERSE

Init:
cpu=85.81,elapsed=85.83,pagefaults=645
Traverse:
cpu=626.17,elapsed=626.26,pagefaults=1 (yow!)

Perturb the defstruct so that the structure is typed (as will be the
default for common-lisp).
Init:
cpu=86.7,elapsed=87.38,pagefaults=650
Traverse:
cpu=669.78,elapsed=669.88,pagefaults=5
[Hmmm.  I didn't think it would be any slower than the previous one
on traverse...  I should look at the xref code.  Creation of such a
structure should not be significantly slower than creation of just a
simple vector, however the structure has one more pointer-slot than
the vector does.]

Inline carcdr.  Make the above typed structure refernece inline-coded
without error checking.  (The above did not do type checking on the
structure, only that it is an "extend" and that the index is within
bounds;  essentially the same as doing a generic vector reference on a
simple vector.)

Init:
cpu=38.93,elapsed=39.72,pagefaults=664
Traverse:
cpu=273.26,elapsed=273.32,pagefaults=4

∂05-Aug-83  2245	GSB@MIT-ML 	NIL Benchmarks, continued   
Received: from MIT-ML by SU-AI with TCP/SMTP; 5 Aug 83  22:45:41 PDT
Date: 6 August 1983 01:46 EDT
From: Glenn S. Burke <GSB @ MIT-ML>
Subject: NIL Benchmarks, continued
To: rpg @ SU-AI

I found i left out TPRINT.  Here it is:

TPRINT

Fixnum arithmetic.
cpu=47.55,elapsed=58.31,pf=0

For whatever it's worth, NIL terminal i/o models ITS, and NIL performs
all of the cursor maintainance.  For simplicity, NIL doesn't do
block-mode output except when it knows it can.  So, when PRIN1
discovers that the print-name of a symbol can be output as-is, it
passes it on to the stream, which (in these cases) will discover that
there are no funny characters, which will cause the symbol print-names
to be output in block-mode.  (So, for number-of-characters output, it
would be faster with larger print-names.)  Various other (atomic)
things internally buffer and do string-outs, such as number printing.

For kicks, to isolate the PRIN1-type overhead from the terminal-io
overhead, doing the same thing with PRINC gives
cpu=34.4,elapsed=45.68,pf=0

There is also the fact that printing "to the default" prints to
standard-output, which is a synonym stream to terminal-io, so there is
message-forwarding for each individual message being sent.  It is a
feature rather than a bug that it effectively recomputes the output
destination for every i/o operation, so standard-output could be
changed at interrupt level, for instance.  Doing
(print test-pattern terminal-io) gives
cpu=36.66,elapsed=47.19,pf=15

And to just hack the i/o, doing a PRINC of a string containing the
characters output, that is,
(defvar test-string
  (with-output-to-string (s)
    (princ test-pattern s)))
(timer string-timit (princ test-string)),
gives:
cpu=4.13,elapsed=14.09,pf=0
What happens here is that princing a string just does one stream
operation (:oustr, i.e. zapping out the string), so everything then is
in the cursor and tty-io code.  Although it is doing block-mode
output, it is not going and outputting the entire string in one block,
becuase it has to go and do cursor-stuff (line wrap, continuation
columns, etc.)
----------------------------------------------------------------

∂09-Oct-83  0922	jkf@ucbkim 	results 
Received: from UCBKIM by SU-AI with TCP/SMTP; 9 Oct 83  09:22:33 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA17573; Sun, 9 Oct 83 09:26:05 PDT
Date: Sun, 9 Oct 83 09:26:05 PDT
From: John Foderaro (on an h19-u) <jkf@ucbkim>
Message-Id: <8310091626.AA17573@ucbkim.ARPA>
To: rpg@su-ai
Subject: results
Cc: 

  the form of the results will be a script showing the computation of each
of the benchmarks on a given machine.  Each benchmark was run 6 times,

		   compiled   compiled-with-local-fcns  interpreted
 translinks on	      1			3		   5
 translinks off	      2			4		   6

Each benchmark was run on three machines, a 780, a 750 and a sun 68k.
The machine the benchmark was run on will be named in the script.
There is no indication of the type of machine.  Here is a key:
  ucbkim 780
  ucbmatisse 750
  ucbmike sun 68k

Another machine you may see mentioned is lbl-csam, a 780.



∂09-Oct-83  0937	jkf@ucbkim 	boyer   
Received: from UCBKIM by SU-AI with TCP/SMTP; 9 Oct 83  09:37:37 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA17661; Sun, 9 Oct 83 09:41:06 PDT
Date: Sun, 9 Oct 83 09:41:06 PDT
From: John Foderaro (on an h19-u) <jkf@ucbkim>
Message-Id: <8310091641.AA17661@ucbkim.ARPA>
To: rpg@su-ai
Subject: boyer
Cc: 

--- Benchmark boyer run on ucbkim at Sat Oct 8 02:45:06 PDT 1983 by jkf
--- cpu usage: 2:45am up 3:15, 0 users, load average: 1.12, 1.13, 1.15
Franz Lisp, Opus 38.81

=> [fasl boyer.o]
t
=> benchmark: test (file boyer) , tranlinks: on, localf: no
executing form: (test)
begin (217 213)
end (4507 2093)
runs 1
avg cpu time 40.16666666666667, avg gc time 31.33333333333333

benchmark: test (file boyer) , tranlinks: off, localf: no
executing form: (test)
begin (4507 2093)
end (14485 3372)
runs 1
avg cpu time 144.9833333333333, avg gc time 21.31666666666667

nil
=> Franz Lisp, Opus 38.81

=> [fasl boyer-l.o]
t
=> benchmark: test (file boyer) , tranlinks: on, localf: yes
executing form: (test)
begin (208 211)
end (3308 2076)
runs 1
avg cpu time 20.58333333333333, avg gc time 31.08333333333333

benchmark: test (file boyer) , tranlinks: off, localf: yes
executing form: (test)
begin (3310 2076)
end (5851 3357)
runs 1
avg cpu time 21.0, avg gc time 21.35

nil
=> Franz Lisp, Opus 38.81

=> [load boyer.l]
[fasl benchmac.o]
[fasl benchmac.o]
t
=> benchmark: test (file boyer) , tranlinks: on, interpreted 
executing form: (test)
begin (304 247)
end (63663 2112)
runs 1
avg cpu time 1024.9, avg gc time 31.08333333333333

benchmark: test (file boyer) , tranlinks: off, interpreted 
executing form: (test)
begin (63665 2112)
end (126963 3672)
runs 1
avg cpu time 1028.966666666667, avg gc time 26.0

nil
=> --- cpu usage: 3:29am up 3:59, 0 users, load average: 1.31, 1.14, 1.12
--- end of benchmark boyer



--- Benchmark boyer run on ucbmatisse at Fri Oct 7 18:16:45 PDT 1983 by jkf
--- cpu usage: 6:16pm up 2:46, 1 users, load average: 1.21, 0.50, 0.29
Franz Lisp, Opus 38.81

=> [fasl boyer.o]
t
=> benchmark: test (file boyer) , tranlinks: on, localf: no
executing form: (test)
begin (400 273)
end (7087 3327)
runs 1
avg cpu time 60.55, avg gc time 50.9

benchmark: test (file boyer) , tranlinks: off, localf: no
executing form: (test)
begin (7089 3327)
end (22613 5409)
runs 1
avg cpu time 224.0333333333333, avg gc time 34.7

nil
=> Franz Lisp, Opus 38.81

=> [fasl boyer-l.o]
t
=> benchmark: test (file boyer) , tranlinks: on, localf: yes
executing form: (test)
begin (385 268)
end (5505 3327)
runs 1
avg cpu time 34.35, avg gc time 50.98333333333333

benchmark: test (file boyer) , tranlinks: off, localf: yes
executing form: (test)
begin (5507 3327)
end (9689 5429)
runs 1
avg cpu time 34.66666666666667, avg gc time 35.03333333333333

nil
=> Franz Lisp, Opus 38.81

=> [load boyer.l]
[fasl benchmac.o]
[fasl benchmac.o]
t
=> benchmark: test (file boyer) , tranlinks: on, interpreted 
executing form: (test)
begin (486 273)
end (99686 3314)
runs 1
avg cpu time 1602.65, avg gc time 50.68333333333333

benchmark: test (file boyer) , tranlinks: off, interpreted 
executing form: (test)
begin (99691 3314)
end (197940 5422)
runs 1
avg cpu time 1602.35, avg gc time 35.13333333333333

nil
=> --- cpu usage: 7:24pm up 3:54, 0 users, load average: 1.00, 1.03, 1.04
--- end of benchmark boyer



--- Benchmark boyer run on ucbmike at Fri Oct 7 14:40:16 PDT 1983 by jkf
--- cpu usage: 2:40pm up 5:12, 0 users, load average: 1.14, 1.05, 1.04
Franz Lisp, Opus 38.79
-> [fasl boyer.o]
t
-> benchmark: test (file boyer) , tranlinks: on, localf: no
executing form: (test)
begin (469 450)
end (8861 4979)
runs 1
avg cpu time 64.38333333333334, avg gc time 75.48333333333333

benchmark: test (file boyer) , tranlinks: off, localf: no
executing form: (test)
begin (8866 4979)
end (26752 8086)
runs 1
avg cpu time 246.3166666666667, avg gc time 51.78333333333333

nil
-> Franz Lisp, Opus 38.79
-> [fasl boyer-l.o]
t
-> benchmark: test (file boyer) , tranlinks: on, localf: yes
executing form: (test)
begin (458 441)
end (7517 4963)
runs 1
avg cpu time 42.28333333333333, avg gc time 75.36666666666666

benchmark: test (file boyer) , tranlinks: off, localf: yes
executing form: (test)
begin (7522 4963)
end (13188 8058)
runs 1
avg cpu time 42.85, avg gc time 51.58333333333334

nil
-> Franz Lisp, Opus 38.79
-> [load boyer.l]
[fasl benchmac.o]
[fasl benchmac.o]
t
-> benchmark: test (file boyer) , tranlinks: on, interpreted 
executing form: (test)
begin (651 510)
end (118177 5034)
runs 1
avg cpu time 1883.366666666667, avg gc time 75.40000000000001

benchmark: test (file boyer) , tranlinks: off, interpreted 
executing form: (test)
begin (118185 5034)
end (234299 8116)
runs 1
avg cpu time 1883.866666666667, avg gc time 51.36666666666667

nil
-> --- cpu usage: 4:04pm up 6:35, 0 users, load average: 1.10, 1.06, 1.03
--- end of benchmark boyer


∂09-Oct-83  0939	jkf@ucbkim 	ctak    
Received: from UCBKIM by SU-AI with TCP/SMTP; 9 Oct 83  09:39:37 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA17672; Sun, 9 Oct 83 09:43:04 PDT
Date: Sun, 9 Oct 83 09:43:04 PDT
From: John Foderaro (on an h19-u) <jkf@ucbkim>
Message-Id: <8310091643.AA17672@ucbkim.ARPA>
To: rpg@su-ai
Subject: ctak
Cc: 

--- Benchmark ctak run on ucbkim at Sat Oct 8 02:40:04 PDT 1983 by jkf
--- cpu usage: 2:40am up 3:10, 0 users, load average: 1.35, 1.19, 1.17
Franz Lisp, Opus 38.81

=> [fasl ctak.o]
t
=> benchmark: test (file ctak) , tranlinks: on, localf: no
executing form: (tak 18 12 6)
begin (44 175)
end (767 175)
runs 1
avg cpu time 12.05

benchmark: test (file ctak) , tranlinks: off, localf: no
executing form: (tak 18 12 6)
begin (767 175)
end (1835 175)
runs 1
avg cpu time 17.8

nil
=> Franz Lisp, Opus 38.81

=> [fasl ctak-l.o]
t
=> benchmark: test (file ctak) , tranlinks: on, localf: yes
executing form: (tak 18 12 6)
begin (45 176)
end (686 176)
runs 1
avg cpu time 10.68333333333333

benchmark: test (file ctak) , tranlinks: off, localf: yes
executing form: (tak 18 12 6)
begin (686 176)
end (1328 176)
runs 1
avg cpu time 10.7

nil
=> Franz Lisp, Opus 38.81

=> [load ctak.l]
[fasl benchmac.o]
[fasl benchmac.o]
t
=> benchmark: test (file ctak) , tranlinks: on, interpreted 
executing form: (tak 18 12 6)
begin (68 174)
end (6531 174)
runs 1
avg cpu time 107.7166666666667

benchmark: test (file ctak) , tranlinks: off, interpreted 
executing form: (tak 18 12 6)
begin (6533 174)
end (13027 174)
runs 1
avg cpu time 108.2333333333333

nil
=> --- cpu usage: 2:45am up 3:15, 0 users, load average: 1.13, 1.14, 1.15
--- end of benchmark ctak


--- Benchmark ctak run on ucbmatisse at Fri Oct 7 13:07:38 PDT 1983 by jkf
--- cpu usage: 1:07pm up 4 days, 53 mins, 4 users, load average: 1.44, 1.24, 1.26
Franz Lisp, Opus 38.81

=> [fasl ctak.o]
t
=> benchmark: test (file ctak) , tranlinks: on, localf: no
executing form: (tak 18 12 6)
begin (122 198)
end (1223 198)
runs 1
avg cpu time 18.35

benchmark: test (file ctak) , tranlinks: off, localf: no
executing form: (tak 18 12 6)
begin (1225 198)
end (2848 198)
runs 1
avg cpu time 27.05

nil
=> Franz Lisp, Opus 38.81

=> [fasl ctak-l.o]
t
=> benchmark: test (file ctak) , tranlinks: on, localf: yes
executing form: (tak 18 12 6)
begin (121 202)
end (1246 202)
runs 1
avg cpu time 18.75

benchmark: test (file ctak) , tranlinks: off, localf: yes
executing form: (tak 18 12 6)
begin (1247 202)
end (2342 202)
runs 1
avg cpu time 18.25

nil
=> Franz Lisp, Opus 38.81

=> [load ctak.l]
[fasl benchmac.o]
[fasl benchmac.o]
t
=> benchmark: test (file ctak) , tranlinks: on, interpreted 
executing form: (tak 18 12 6)
begin (163 200)
end (10258 200)
runs 1
avg cpu time 168.25

benchmark: test (file ctak) , tranlinks: off, interpreted 
executing form: (tak 18 12 6)
begin (10260 200)
end (20409 200)
runs 1
avg cpu time 169.15

nil
=> --- cpu usage: 1:16pm up 4 days, 1:02, 5 users, load average: 1.53, 1.32, 1.28
--- end of benchmark ctak


--- Benchmark ctak run on ucbmike at Fri Oct 7 14:30:45 PDT 1983 by jkf
--- cpu usage: 2:30pm up 5:02, 0 users, load average: 1.17, 1.06, 1.04
Franz Lisp, Opus 38.79
-> [fasl ctak.o]
t
-> benchmark: test (file ctak) , tranlinks: on, localf: no
executing form: (tak 18 12 6)
begin (89 324)
end (1334 324)
runs 1
avg cpu time 20.75

benchmark: test (file ctak) , tranlinks: off, localf: no
executing form: (tak 18 12 6)
begin (1336 324)
end (3044 324)
runs 1
avg cpu time 28.46666666666667

nil
-> Franz Lisp, Opus 38.79
-> [fasl ctak-l.o]
t
-> benchmark: test (file ctak) , tranlinks: on, localf: yes
executing form: (tak 18 12 6)
begin (89 324)
end (1265 324)
runs 1
avg cpu time 19.6

benchmark: test (file ctak) , tranlinks: off, localf: yes
executing form: (tak 18 12 6)
begin (1267 324)
end (2444 324)
runs 1
avg cpu time 19.61666666666667

nil
-> Franz Lisp, Opus 38.79
-> [load ctak.l]
[fasl benchmac.o]
[fasl benchmac.o]
t
-> benchmark: test (file ctak) , tranlinks: on, interpreted 
executing form: (tak 18 12 6)
begin (142 325)
end (11786 325)
runs 1
avg cpu time 194.0666666666667

benchmark: test (file ctak) , tranlinks: off, interpreted 
executing form: (tak 18 12 6)
begin (11792 325)
end (23437 325)
runs 1
avg cpu time 194.0833333333333

nil
-> --- cpu usage: 2:40pm up 5:11, 0 users, load average: 1.07, 1.04, 1.04
--- end of benchmark ctak


∂09-Oct-83  0946	jkf@ucbkim 	browse  
Received: from UCBKIM by SU-AI with TCP/SMTP; 9 Oct 83  09:45:54 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA17720; Sun, 9 Oct 83 09:49:22 PDT
Date: Sun, 9 Oct 83 09:49:22 PDT
From: John Foderaro (on an h19-u) <jkf@ucbkim>
Message-Id: <8310091649.AA17720@ucbkim.ARPA>
To: rpg@su-ai
Subject: browse
Cc: 

--- Benchmark browse run on ucbkim at Sat Oct 8 01:30:11 PDT 1983 by jkf
--- cpu usage: 1:30am up 2 hrs, 4 users, load average: 1.25, 1.21, 1.28
Franz Lisp, Opus 38.81

=> [fasl browse.o]
t
=> benchmark: browse (file browse) , tranlinks: on, localf: no
executing form: (browse)
begin (14 146)
end (10229 5666)
runs 1
avg cpu time 78.25, avg gc time 92.0

benchmark: browse (file browse) , tranlinks: off, localf: no
executing form: (browse)
begin (10231 5666)
end (24233 10336)
runs 1
avg cpu time 155.5333333333333, avg gc time 77.83333333333333

nil
=> Franz Lisp, Opus 38.81

=> [fasl browse-l.o]
t
=> benchmark: browse (file browse) , tranlinks: on, localf: yes
executing form: (browse)
begin (20 146)
end (9739 5484)
runs 1
avg cpu time 73.01666666666667, avg gc time 88.96666666666667

benchmark: browse (file browse) , tranlinks: off, localf: yes
executing form: (browse)
begin (9740 5484)
end (22520 10122)
runs 1
avg cpu time 135.7, avg gc time 77.3

nil
=> Franz Lisp, Opus 38.81

=> [load browse.l]
[fasl benchmac.o]
t
=> benchmark: browse (file browse) , tranlinks: on, interpreted 
executing form: (browse)
begin (49 146)
end (80821 5494)
runs 1
avg cpu time 1257.066666666667, avg gc time 89.13333333333333

benchmark: browse (file browse) , tranlinks: off, interpreted 
executing form: (browse)
begin (80824 5494)
end (160988 10343)
runs 1
avg cpu time 1255.25, avg gc time 80.81666666666667

nil
=> --- cpu usage: 2:36am up 3:06, 0 users, load average: 1.24, 1.22, 1.17
--- end of benchmark browse


--- Benchmark browse run on ucbmatisse at Fri Oct 7 11:11:42 PDT 1983 by jkf
--- cpu usage: 11:11am up 3 days, 22:58, 3 users, load average: 1.05, 1.15, 1.15
Franz Lisp, Opus 38.81

=> [fasl browse.o]
t
=> benchmark: browse (file browse) , tranlinks: on, localf: no
executing form: (browse)
begin (74 149)
end (15742 8457)
runs 1
avg cpu time 122.6666666666667, avg gc time 138.4666666666667

benchmark: browse (file browse) , tranlinks: off, localf: no
executing form: (browse)
begin (15745 8457)
end (39106 16552)
runs 1
avg cpu time 254.4333333333333, avg gc time 134.9166666666667

nil
=> Franz Lisp, Opus 38.81

=> [fasl browse-l.o]
t
=> benchmark: browse (file browse) , tranlinks: on, localf: yes
executing form: (browse)
begin (74 151)
end (15382 8427)
runs 1
avg cpu time 117.2, avg gc time 137.9333333333333

benchmark: browse (file browse) , tranlinks: off, localf: yes
executing form: (browse)
begin (15384 8427)
end (36085 16379)
runs 1
avg cpu time 212.4833333333333, avg gc time 132.5333333333333

nil
=> Franz Lisp, Opus 38.81

=> [load browse.l]
[fasl benchmac.o]
t
=> benchmark: browse (file browse) , tranlinks: on, interpreted 
executing form: (browse)
begin (121 154)
end (129293 8550)
runs 1
avg cpu time 2012.933333333333, avg gc time 139.9333333333333

benchmark: browse (file browse) , tranlinks: off, interpreted 
executing form: (browse)
begin (129367 8620)
end (258983 16520)
runs 1
avg cpu time 2028.6, avg gc time 131.6666666666667

nil
=> --- cpu usage: 1:00pm up 4 days, 47 mins, 4 users, load average: 1.00, 1.13, 1.28
--- end of benchmark browse

--- Benchmark browse run on ucbmike at Fri Oct 7 12:19:34 PDT 1983 by jkf
--- cpu usage: 12:19pm up 2:51, 0 users, load average: 1.01, 1.03, 1.04
Franz Lisp, Opus 38.79
-> [fasl browse.o]
t
-> benchmark: browse (file browse) , tranlinks: on, localf: no
executing form: (browse)
begin (28 259)
end (24810 16798)
runs 1
avg cpu time 137.3833333333333, avg gc time 275.65

benchmark: browse (file browse) , tranlinks: off, localf: no
executing form: (browse)
begin (24815 16798)
end (53430 30307)
runs 1
avg cpu time 251.7666666666667, avg gc time 225.15

nil
-> Franz Lisp, Opus 38.79
-> [fasl browse-l.o]
t
-> benchmark: browse (file browse) , tranlinks: on, localf: yes
executing form: (browse)
begin (29 259)
end (24629 16790)
runs 1
avg cpu time 134.4833333333333, avg gc time 275.5166666666667

benchmark: browse (file browse) , tranlinks: off, localf: yes
executing form: (browse)
begin (24634 16790)
end (51791 30276)
runs 1
avg cpu time 227.85, avg gc time 224.7666666666667

nil
-> Franz Lisp, Opus 38.79
-> [load browse.l]
[fasl benchmac.o]
t
-> benchmark: browse (file browse) , tranlinks: on, interpreted 
executing form: (browse)
begin (144 325)
end (155181 16269)
runs 1
avg cpu time 2318.216666666667, avg gc time 265.7333333333333

benchmark: browse (file browse) , tranlinks: off, interpreted 
executing form: (browse)
begin (155188 16269)
end (307376 29686)
runs 1
avg cpu time 2312.85, avg gc time 223.6166666666667

nil
-> --- cpu usage: 2:22pm up 4:54, 0 users, load average: 1.01, 1.03, 1.03
--- end of benchmark browse


∂09-Oct-83  0957	jkf@ucbkim 	destru  
Received: from UCBKIM by SU-AI with TCP/SMTP; 9 Oct 83  09:56:55 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA17828; Sun, 9 Oct 83 10:00:19 PDT
Date: Sun, 9 Oct 83 10:00:19 PDT
From: John Foderaro (on an h19-u) <jkf@ucbkim>
Message-Id: <8310091700.AA17828@ucbkim.ARPA>
To: rpg@su-ai
Subject: destru
Cc: 

--- Benchmark destru run on ucbkim at Sat Oct 8 01:22:39 PDT 1983 by jkf
--- cpu usage: 1:22am up 1:53, 4 users, load average: 1.32, 1.38, 1.37
Franz Lisp, Opus 38.81

=> [fasl destru.o]
t
=> benchmark: test (file destru) , tranlinks: on, localf: no
executing form: (destructive 600 50)
begin (49 176)
end (765 583)
runs 1
avg cpu time 5.15, avg gc time 6.783333333333333

benchmark: test (file destru) , tranlinks: off, localf: no
executing form: (destructive 600 50)
begin (766 583)
end (1589 989)
runs 1
avg cpu time 6.95, avg gc time 6.766666666666667

nil
=> Franz Lisp, Opus 38.81

=> [fasl destru-l.o]
t
=> benchmark: test (file destru) , tranlinks: on, localf: yes
executing form: (destructive 600 50)
begin (43 173)
end (753 571)
runs 1
avg cpu time 5.2, avg gc time 6.633333333333333

benchmark: test (file destru) , tranlinks: off, localf: yes
executing form: (destructive 600 50)
begin (754 571)
end (1574 973)
runs 1
avg cpu time 6.966666666666667, avg gc time 6.7

nil
=> Franz Lisp, Opus 38.81

=> [load destru.l]
[fasl benchmac.o]
t
=> benchmark: test (file destru) , tranlinks: on, interpreted 
executing form: (destructive 600 50)
begin (66 177)
end (9984 607)
runs 1
avg cpu time 158.1333333333333, avg gc time 7.166666666666667

benchmark: test (file destru) , tranlinks: off, interpreted 
executing form: (destructive 600 50)
begin (9987 607)
end (19864 1006)
runs 1
avg cpu time 157.9666666666667, avg gc time 6.65

nil
=> --- cpu usage: 1:30am up 2 hrs, 4 users, load average: 1.28, 1.22, 1.29
--- end of benchmark destru


--- Benchmark destru run on ucbmatisse at Fri Oct 7 10:59:38 PDT 1983 by jkf
--- cpu usage: 10:59am up 3 days, 22:45, 2 users, load average: 1.00, 1.04, 1.09
Franz Lisp, Opus 38.81

=> [fasl destru.o]
t
=> benchmark: test (file destru) , tranlinks: on, localf: no
executing form: (destructive 600 50)
begin (118 197)
end (1056 614)
runs 1
avg cpu time 8.683333333333333, avg gc time 6.95

benchmark: test (file destru) , tranlinks: off, localf: no
executing form: (destructive 600 50)
begin (1059 614)
end (2179 1073)
runs 1
avg cpu time 11.01666666666667, avg gc time 7.65

nil
=> Franz Lisp, Opus 38.81

=> [fasl destru-l.o]
t
=> benchmark: test (file destru) , tranlinks: on, localf: yes
executing form: (destructive 600 50)
begin (118 195)
end (1060 609)
runs 1
avg cpu time 8.8, avg gc time 6.9

benchmark: test (file destru) , tranlinks: off, localf: yes
executing form: (destructive 600 50)
begin (1062 609)
end (2209 1072)
runs 1
avg cpu time 11.4, avg gc time 7.716666666666667

nil
=> Franz Lisp, Opus 38.81

=> [load destru.l]
[fasl benchmac.o]
t
=> benchmark: test (file destru) , tranlinks: on, interpreted 
executing form: (destructive 600 50)
begin (143 201)
end (16300 702)
runs 1
avg cpu time 260.9333333333333, avg gc time 8.35

benchmark: test (file destru) , tranlinks: off, interpreted 
executing form: (destructive 600 50)
begin (16304 702)
end (32406 1254)
runs 1
avg cpu time 259.1666666666667, avg gc time 9.2

nil
=> --- cpu usage: 11:11am up 3 days, 22:57, 3 users, load average: 1.05, 1.16, 1.15
--- end of benchmark destru

--- Benchmark destru run on ucbmike at Fri Oct 7 12:05:16 PDT 1983 by jkf
--- cpu usage: 12:05pm up 2:37, 0 users, load average: 1.08, 1.06, 1.06
Franz Lisp, Opus 38.79
-> [fasl destru.o]
t
-> benchmark: test (file destru) , tranlinks: on, localf: no
executing form: (destructive 600 50)
begin (89 322)
end (1469 863)
runs 1
avg cpu time 13.98333333333333, avg gc time 9.016666666666667

benchmark: test (file destru) , tranlinks: off, localf: no
executing form: (destructive 600 50)
begin (1473 863)
end (3164 1406)
runs 1
avg cpu time 19.13333333333333, avg gc time 9.050000000000001

nil
-> Franz Lisp, Opus 38.79
-> [fasl destru-l.o]
t
-> benchmark: test (file destru) , tranlinks: on, localf: yes
executing form: (destructive 600 50)
begin (88 324)
end (1471 866)
runs 1
avg cpu time 14.01666666666667, avg gc time 9.033333333333333

benchmark: test (file destru) , tranlinks: off, localf: yes
executing form: (destructive 600 50)
begin (1475 866)
end (3167 1409)
runs 1
avg cpu time 19.15, avg gc time 9.050000000000001

nil
-> Franz Lisp, Opus 38.79
-> [load destru.l]
[fasl benchmac.o]
t
-> benchmark: test (file destru) , tranlinks: on, interpreted 
executing form: (destructive 600 50)
begin (120 328)
end (19517 904)
runs 1
avg cpu time 313.6833333333333, avg gc time 9.6

benchmark: test (file destru) , tranlinks: off, interpreted 
executing form: (destructive 600 50)
begin (19525 904)
end (38990 1556)
runs 1
avg cpu time 313.55, avg gc time 10.86666666666667

nil
-> --- cpu usage: 12:19pm up 2:51, 0 users, load average: 1.01, 1.03, 1.04
--- end of benchmark destru


∂09-Oct-83  1121	jkf@ucbkim 	div2    
Received: from UCBKIM by SU-AI with TCP/SMTP; 9 Oct 83  11:21:24 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA18807; Sun, 9 Oct 83 11:24:43 PDT
Date: Sun, 9 Oct 83 11:24:43 PDT
From: John Foderaro (on an h19-u) <jkf@ucbkim>
Message-Id: <8310091824.AA18807@ucbkim.ARPA>
To: rpg@su-ai
Subject: div2
Cc: 

--- Benchmark div2 run on lbl-csam at Sun Oct 9 10:48:37 PDT 1983 by jkf
--- cpu usage: 10:48am up 2 days, 2:41, 4 users, load average: 0.97, 0.90, 0.79
Franz Lisp, Opus 38.81
-> [fasl div2.o]
t
-> benchmark: timit1 (file div2-timit1) , tranlinks: on, localf: no
executing form: (test1 l)
begin (38 173)
end (1236 1179)
runs 1
avg cpu time 3.2, avg gc time 16.76666666666667

benchmark: timit1 (file div2-timit1) , tranlinks: off, localf: no
executing form: (test1 l)
begin (1238 1179)
end (2450 2184)
runs 1
avg cpu time 3.45, avg gc time 16.75

benchmark: timit2 (file div2-timit2) , tranlinks: on, localf: no
executing form: (test2 l)
begin (2477 2210)
end (3956 3228)
runs 1
avg cpu time 7.683333333333333, avg gc time 16.96666666666667

benchmark: timit2 (file div2-timit2) , tranlinks: off, localf: no
executing form: (test2 l)
begin (3958 3228)
end (6721 4258)
runs 1
avg cpu time 28.88333333333333, avg gc time 17.16666666666667

nil
-> Franz Lisp, Opus 38.81
-> [fasl div2-l.o]
t
-> benchmark: timit1 (file div2-timit1) , tranlinks: on, localf: yes
executing form: (test1 l)
begin (46 177)
end (1258 1188)
runs 1
avg cpu time 3.35, avg gc time 16.85

benchmark: timit1 (file div2-timit1) , tranlinks: off, localf: yes
executing form: (test1 l)
begin (1258 1188)
end (2460 2190)
runs 1
avg cpu time 3.333333333333333, avg gc time 16.7

benchmark: timit2 (file div2-timit2) , tranlinks: on, localf: yes
executing form: (test2 l)
begin (2489 2218)
end (3709 3225)
runs 1
avg cpu time 3.55, avg gc time 16.78333333333333

benchmark: timit2 (file div2-timit2) , tranlinks: off, localf: yes
executing form: (test2 l)
begin (3710 3225)
end (4923 4230)
runs 1
avg cpu time 3.466666666666667, avg gc time 16.75

nil
-> Franz Lisp, Opus 38.81
-> [load div2.l]
[fasl benchmac.o]
t
-> benchmark: timit1 (file div2-timit1) , tranlinks: on, interpreted 
executing form: (test1 l)
begin (81 174)
end (9136 1365)
runs 1
avg cpu time 131.0666666666667, avg gc time 19.85

benchmark: timit1 (file div2-timit1) , tranlinks: off, interpreted 
executing form: (test1 l)
begin (9139 1365)
end (18422 2608)
runs 1
avg cpu time 134.0, avg gc time 20.71666666666667

benchmark: timit2 (file div2-timit2) , tranlinks: on, interpreted 
executing form: (test2 l)
begin (18453 2636)
end (29338 3874)
runs 1
avg cpu time 160.7833333333333, avg gc time 20.63333333333333

benchmark: timit2 (file div2-timit2) , tranlinks: off, interpreted 
executing form: (test2 l)
begin (29340 3874)
end (40272 5157)
runs 1
avg cpu time 160.8166666666667, avg gc time 21.38333333333333

nil
-> --- cpu usage: 11:06am up 2 days, 2:58, 2 users, load average: 1.16, 1.29, 1.28
--- end of benchmark div2


--- Benchmark div2 run on ucbmatisse at Sun Oct 9 10:44:37 PDT 1983 by jkf
--- cpu usage: 10:44am up 1 day, 19:13, 2 users, load average: 0.87, 0.99, 0.83
Franz Lisp, Opus 38.81

=> [fasl div2.o]
t
=> benchmark: timit1 (file div2-timit1) , tranlinks: on, localf: no
executing form: (test1 l)
begin (122 201)
end (1672 1429)
runs 1
avg cpu time 5.366666666666667, avg gc time 20.46666666666667

benchmark: timit1 (file div2-timit1) , tranlinks: off, localf: no
executing form: (test1 l)
begin (1673 1429)
end (3286 2706)
runs 1
avg cpu time 5.6, avg gc time 21.28333333333333

benchmark: timit2 (file div2-timit2) , tranlinks: on, localf: no
executing form: (test2 l)
begin (3337 2755)
end (5250 3990)
runs 1
avg cpu time 11.3, avg gc time 20.58333333333333

benchmark: timit2 (file div2-timit2) , tranlinks: off, localf: no
executing form: (test2 l)
begin (5251 3990)
end (8956 5269)
runs 1
avg cpu time 40.43333333333333, avg gc time 21.31666666666667

nil
=> Franz Lisp, Opus 38.81

=> [fasl div2-l.o]
t
=> benchmark: timit1 (file div2-timit1) , tranlinks: on, localf: yes
executing form: (test1 l)
begin (121 198)
end (1664 1411)
runs 1
avg cpu time 5.5, avg gc time 20.21666666666667

benchmark: timit1 (file div2-timit1) , tranlinks: off, localf: yes
executing form: (test1 l)
begin (1666 1411)
end (3271 2682)
runs 1
avg cpu time 5.566666666666667, avg gc time 21.18333333333333

benchmark: timit2 (file div2-timit2) , tranlinks: on, localf: yes
executing form: (test2 l)
begin (3318 2727)
end (4895 3959)
runs 1
avg cpu time 5.75, avg gc time 20.53333333333333

benchmark: timit2 (file div2-timit2) , tranlinks: off, localf: yes
executing form: (test2 l)
begin (4896 3959)
end (6518 5247)
runs 1
avg cpu time 5.566666666666667, avg gc time 21.46666666666667

nil
=> Franz Lisp, Opus 38.81

=> [load div2.l]
[fasl benchmac.o]
t
=> benchmark: timit1 (file div2-timit1) , tranlinks: on, interpreted 
executing form: (test1 l)
begin (172 199)
end (15605 1590)
runs 1
avg cpu time 234.0333333333333, avg gc time 23.18333333333333

benchmark: timit1 (file div2-timit1) , tranlinks: off, interpreted 
executing form: (test1 l)
begin (15610 1590)
end (30856 3021)
runs 1
avg cpu time 230.25, avg gc time 23.85

benchmark: timit2 (file div2-timit2) , tranlinks: on, interpreted 
executing form: (test2 l)
begin (30913 3071)
end (48290 4523)
runs 1
avg cpu time 265.4166666666667, avg gc time 24.2

benchmark: timit2 (file div2-timit2) , tranlinks: off, interpreted 
executing form: (test2 l)
begin (48293 4523)
end (65642 5974)
runs 1
avg cpu time 264.9666666666667, avg gc time 24.18333333333333

nil
=> --- cpu usage: 11:10am up 1 day, 19:38, 3 users, load average: 1.33, 1.25, 1.13
--- end of benchmark div2


--- Benchmark div2 run on ucbmike at Sun Oct 9 10:23:10 PDT 1983 by jkf
--- cpu usage: 10:23am up 2 days, 55 mins, 2 users, load average: 0.95, 0.98, 0.92
Franz Lisp, Opus 38.79
-> [fasl div2.o]
t
-> benchmark: timit1 (file div2-timit1) , tranlinks: on, localf: no
executing form: (test1 l)
begin (94 325)
end (2063 1817)
runs 1
avg cpu time 7.95, avg gc time 24.86666666666667

benchmark: timit1 (file div2-timit1) , tranlinks: off, localf: no
executing form: (test1 l)
begin (2066 1817)
end (4057 3313)
runs 1
avg cpu time 8.25, avg gc time 24.93333333333333

benchmark: timit2 (file div2-timit2) , tranlinks: on, localf: no
executing form: (test2 l)
begin (4129 3380)
end (6301 4884)
runs 1
avg cpu time 11.13333333333333, avg gc time 25.06666666666667

benchmark: timit2 (file div2-timit2) , tranlinks: off, localf: no
executing form: (test2 l)
begin (6305 4884)
end (10416 6397)
runs 1
avg cpu time 43.3, avg gc time 25.21666666666667

nil
-> Franz Lisp, Opus 38.79
-> [fasl div2-l.o]
t
-> benchmark: timit1 (file div2-timit1) , tranlinks: on, localf: yes
executing form: (test1 l)
begin (94 325)
end (2052 1813)
runs 1
avg cpu time 7.833333333333333, avg gc time 24.8

benchmark: timit1 (file div2-timit1) , tranlinks: off, localf: yes
executing form: (test1 l)
begin (2056 1813)
end (4023 3306)
runs 1
avg cpu time 7.9, avg gc time 24.88333333333333

benchmark: timit2 (file div2-timit2) , tranlinks: on, localf: yes
executing form: (test2 l)
begin (4096 3373)
end (6035 4871)
runs 1
avg cpu time 7.35, avg gc time 24.96666666666667

benchmark: timit2 (file div2-timit2) , tranlinks: off, localf: yes
executing form: (test2 l)
begin (6040 4871)
end (7989 6384)
runs 1
avg cpu time 7.266666666666667, avg gc time 25.21666666666667

nil
-> Franz Lisp, Opus 38.79
-> [load div2.l]
[fasl benchmac.o]
t
-> benchmark: timit1 (file div2-timit1) , tranlinks: on, interpreted 
executing form: (test1 l)
begin (211 396)
end (18338 2063)
runs 1
avg cpu time 274.3333333333333, avg gc time 27.78333333333333

benchmark: timit1 (file div2-timit1) , tranlinks: off, interpreted 
executing form: (test1 l)
begin (18344 2063)
end (36559 3818)
runs 1
avg cpu time 274.3333333333333, avg gc time 29.25

benchmark: timit2 (file div2-timit2) , tranlinks: on, interpreted 
executing form: (test2 l)
begin (36638 3890)
end (57008 5626)
runs 1
avg cpu time 310.5666666666667, avg gc time 28.93333333333333

benchmark: timit2 (file div2-timit2) , tranlinks: off, interpreted 
executing form: (test2 l)
begin (57014 5626)
end (77452 7442)
runs 1
avg cpu time 310.3666666666667, avg gc time 30.26666666666667

nil
-> --- cpu usage: 10:52am up 2 days, 1:24, 2 users, load average: 1.01, 1.01, 1.00
--- end of benchmark div2


∂09-Oct-83  1122	jkf@ucbkim 	fprint  
Received: from UCBKIM by SU-AI with TCP/SMTP; 9 Oct 83  11:22:05 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA18823; Sun, 9 Oct 83 11:25:33 PDT
Date: Sun, 9 Oct 83 11:25:33 PDT
From: John Foderaro (on an h19-u) <jkf@ucbkim>
Message-Id: <8310091825.AA18823@ucbkim.ARPA>
To: rpg@su-ai
Subject: fprint
Cc: 

--- Benchmark fprint run on lbl-csam at Sun Oct 9 11:06:21 PDT 1983 by jkf
--- cpu usage: 11:06am up 2 days, 2:58, 2 users, load average: 1.15, 1.29, 1.28
Franz Lisp, Opus 38.81
-> [fasl fprint.o]
t
-> benchmark: test (file fprint) , tranlinks: on, localf: no
executing form: (fprint)
begin (68 179)
end (254 179)
runs 5
avg cpu time 0.62

benchmark: test (file fprint) , tranlinks: off, localf: no
executing form: (fprint)
begin (255 179)
end (445 179)
runs 5
avg cpu time 0.6333333333333333

nil
-> Franz Lisp, Opus 38.81
-> [fasl fprint-l.o]
t
-> benchmark: test (file fprint) , tranlinks: on, localf: yes
executing form: (fprint)
begin (57 182)
end (245 182)
runs 5
avg cpu time 0.6266666666666667

benchmark: test (file fprint) , tranlinks: off, localf: yes
executing form: (fprint)
begin (247 182)
end (439 182)
runs 5
avg cpu time 0.64

nil
-> Franz Lisp, Opus 38.81
-> [load fprint.l]
[fasl benchmac.o]
t
-> benchmark: test (file fprint) , tranlinks: on, interpreted 
executing form: (fprint)
begin (970 664)
end (1160 664)
runs 5
avg cpu time 0.6333333333333333

benchmark: test (file fprint) , tranlinks: off, interpreted 
executing form: (fprint)
begin (1163 664)
end (1359 664)
runs 5
avg cpu time 0.6533333333333333

nil
-> --- cpu usage: 11:07am up 2 days, 2:59, 2 users, load average: 1.15, 1.27, 1.27
--- end of benchmark fprint


--- Benchmark fprint run on ucbmatisse at Sun Oct 9 11:10:16 PDT 1983 by jkf
--- cpu usage: 11:10am up 1 day, 19:39, 3 users, load average: 1.47, 1.28, 1.14
Franz Lisp, Opus 38.81

=> [fasl fprint.o]
t
=> benchmark: test (file fprint) , tranlinks: on, localf: no
executing form: (fprint)
begin (155 208)
end (492 208)
runs 5
avg cpu time 1.123333333333333

benchmark: test (file fprint) , tranlinks: off, localf: no
executing form: (fprint)
begin (496 208)
end (817 208)
runs 5
avg cpu time 1.07

nil
=> Franz Lisp, Opus 38.81

=> [fasl fprint-l.o]
t
=> benchmark: test (file fprint) , tranlinks: on, localf: yes
executing form: (fprint)
begin (147 206)
end (480 206)
runs 5
avg cpu time 1.11

benchmark: test (file fprint) , tranlinks: off, localf: yes
executing form: (fprint)
begin (482 206)
end (813 206)
runs 5
avg cpu time 1.103333333333333

nil
=> Franz Lisp, Opus 38.81

=> [load fprint.l]
[fasl benchmac.o]
t
=> benchmark: test (file fprint) , tranlinks: on, interpreted 
executing form: (fprint)
begin (1522 874)
end (1853 874)
runs 5
avg cpu time 1.103333333333333

benchmark: test (file fprint) , tranlinks: off, interpreted 
executing form: (fprint)
begin (1856 874)
end (2182 874)
runs 5
avg cpu time 1.086666666666667

nil
=> --- cpu usage: 11:12am up 1 day, 19:40, 3 users, load average: 1.29, 1.28, 1.15
--- end of benchmark fprint


--- Benchmark fprint run on ucbmike at Sun Oct 9 10:52:39 PDT 1983 by jkf
--- cpu usage: 10:52am up 2 days, 1:24, 2 users, load average: 1.01, 1.01, 1.00
Franz Lisp, Opus 38.79
-> [fasl fprint.o]
t
-> benchmark: test (file fprint) , tranlinks: on, localf: no
executing form: (fprint)
begin (143 341)
end (571 341)
runs 5
avg cpu time 1.426666666666667

benchmark: test (file fprint) , tranlinks: off, localf: no
executing form: (fprint)
begin (573 341)
end (1000 341)
runs 5
avg cpu time 1.423333333333333

nil
-> Franz Lisp, Opus 38.79
-> [fasl fprint-l.o]
t
-> benchmark: test (file fprint) , tranlinks: on, localf: yes
executing form: (fprint)
begin (120 341)
end (547 341)
runs 5
avg cpu time 1.423333333333333

benchmark: test (file fprint) , tranlinks: off, localf: yes
executing form: (fprint)
begin (551 341)
end (979 341)
runs 5
avg cpu time 1.426666666666667

nil
-> Franz Lisp, Opus 38.79
-> [load fprint.l]
[fasl benchmac.o]
t
-> benchmark: test (file fprint) , tranlinks: on, interpreted 
executing form: (fprint)
begin (729 412)
end (1155 412)
runs 5
avg cpu time 1.42

benchmark: test (file fprint) , tranlinks: off, interpreted 
executing form: (fprint)
begin (1160 412)
end (1589 412)
runs 5
avg cpu time 1.43

nil
-> --- cpu usage: 10:54am up 2 days, 1:26, 2 users, load average: 1.08, 1.05, 1.02
--- end of benchmark fprint


∂09-Oct-83  1346	jkf@ucbkim 	fread   
Received: from UCBKIM by SU-AI with TCP/SMTP; 9 Oct 83  13:46:38 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA20663; Sun, 9 Oct 83 13:49:58 PDT
Date: Sun, 9 Oct 83 13:49:58 PDT
From: John Foderaro (on an h19-u) <jkf@ucbkim>
Message-Id: <8310092049.AA20663@ucbkim.ARPA>
To: rpg@su-ai
Subject: fread
Cc: 

--- Benchmark fread run on lbl-csam at Sun Oct 9 13:45:40 PDT 1983 by jkf
--- cpu usage: 1:45pm up 2 days, 5:38, 5 users, load average: 0.67, 0.40, 0.31
Franz Lisp, Opus 38.81
-> [fasl fread.o]
t
-> benchmark: test (file fread) , tranlinks: on, localf: no
executing form: (fread)
begin (44 175)
end (487 267)
runs 5
avg cpu time 1.17, avg gc time 0.3066666666666667

benchmark: test (file fread) , tranlinks: off, localf: no
executing form: (fread)
begin (488 267)
end (896 325)
runs 5
avg cpu time 1.166666666666667, avg gc time 0.1933333333333333

nil
-> Franz Lisp, Opus 38.81
-> [fasl fread-l.o]
t
-> benchmark: test (file fread) , tranlinks: on, localf: yes
executing form: (fread)
begin (44 175)
end (494 271)
runs 5
avg cpu time 1.18, avg gc time 0.32

benchmark: test (file fread) , tranlinks: off, localf: yes
executing form: (fread)
begin (496 271)
end (907 328)
runs 5
avg cpu time 1.18, avg gc time 0.19

nil
-> Franz Lisp, Opus 38.81
-> [load fread.l]
[fasl benchmac.o]
t
-> benchmark: test (file fread) , tranlinks: on, interpreted 
executing form: (fread)
begin (53 173)
end (551 302)
runs 5
avg cpu time 1.23, avg gc time 0.43

benchmark: test (file fread) , tranlinks: off, interpreted 
executing form: (fread)
begin (554 302)
end (984 362)
runs 5
avg cpu time 1.233333333333333, avg gc time 0.2

nil
-> --- cpu usage: 1:47pm up 2 days, 5:39, 5 users, load average: 1.86, 0.90, 0.51
--- end of benchmark fread


--- Benchmark fread run on ucbmatisse at Sun Oct 9 13:41:39 PDT 1983 by jkf
--- cpu usage: 1:41pm up 1 day, 22:10, 4 users, load average: 1.40, 1.06, 0.70
Franz Lisp, Opus 38.81

=> [fasl fread.o]
t
=> benchmark: test (file fread) , tranlinks: on, localf: no
executing form: (fread)
begin (122 205)
end (824 302)
runs 5
avg cpu time 2.016666666666667, avg gc time 0.3233333333333333

benchmark: test (file fread) , tranlinks: off, localf: no
executing form: (fread)
begin (826 302)
end (1541 406)
runs 5
avg cpu time 2.036666666666667, avg gc time 0.3466666666666667

nil
=> Franz Lisp, Opus 38.81

=> [fasl fread-l.o]
t
=> benchmark: test (file fread) , tranlinks: on, localf: yes
executing form: (fread)
begin (122 204)
end (818 303)
runs 5
avg cpu time 1.99, avg gc time 0.33

benchmark: test (file fread) , tranlinks: off, localf: yes
executing form: (fread)
begin (820 303)
end (1535 402)
runs 5
avg cpu time 2.053333333333333, avg gc time 0.33

nil
=> Franz Lisp, Opus 38.81

=> [load fread.l]
[fasl benchmac.o]
t
=> benchmark: test (file fread) , tranlinks: on, interpreted 
executing form: (fread)
begin (151 215)
end (866 315)
runs 5
avg cpu time 2.05, avg gc time 0.3333333333333333

benchmark: test (file fread) , tranlinks: off, interpreted 
executing form: (fread)
begin (871 315)
end (1609 414)
runs 5
avg cpu time 2.13, avg gc time 0.33

nil
=> --- cpu usage: 1:44pm up 1 day, 22:13, 4 users, load average: 2.10, 1.49, 0.94
--- end of benchmark fread


--- Benchmark fread run on ucbmike at Sun Oct 9 13:20:17 PDT 1983 by jkf
--- cpu usage: 1:20pm up 2 days, 3:52, 2 users, load average: 0.72, 0.40, 0.18
Franz Lisp, Opus 38.79
-> [fasl fread.o]
t
-> benchmark: test (file fread) , tranlinks: on, localf: no
executing form: (fread)
begin (88 324)
end (931 481)
runs 5
avg cpu time 2.286666666666667, avg gc time 0.5233333333333333

benchmark: test (file fread) , tranlinks: off, localf: no
executing form: (fread)
begin (936 481)
end (1851 711)
runs 5
avg cpu time 2.283333333333333, avg gc time 0.7666666666666667

nil
-> Franz Lisp, Opus 38.79
-> [fasl fread-l.o]
t
-> benchmark: test (file fread) , tranlinks: on, localf: yes
executing form: (fread)
begin (85 323)
end (923 478)
runs 5
avg cpu time 2.276666666666667, avg gc time 0.5166666666666667

benchmark: test (file fread) , tranlinks: off, localf: yes
executing form: (fread)
begin (928 478)
end (1842 709)
runs 5
avg cpu time 2.276666666666667, avg gc time 0.77

nil
-> Franz Lisp, Opus 38.79
-> [load fread.l]
[fasl benchmac.o]
t
-> benchmark: test (file fread) , tranlinks: on, interpreted 
executing form: (fread)
begin (110 327)
end (1024 557)
runs 5
avg cpu time 2.28, avg gc time 0.7666666666666667

benchmark: test (file fread) , tranlinks: off, interpreted 
executing form: (fread)
begin (1031 557)
end (1954 793)
runs 5
avg cpu time 2.29, avg gc time 0.7866666666666666

nil
-> --- cpu usage: 1:22pm up 2 days, 3:54, 2 users, load average: 1.01, 0.64, 0.31
--- end of benchmark fread


∂09-Oct-83  1349	jkf@ucbkim 	stak    
Received: from UCBKIM by SU-AI with TCP/SMTP; 9 Oct 83  13:48:58 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA20699; Sun, 9 Oct 83 13:52:26 PDT
Date: Sun, 9 Oct 83 13:52:26 PDT
From: John Foderaro (on an h19-u) <jkf@ucbkim>
Message-Id: <8310092052.AA20699@ucbkim.ARPA>
To: rpg@su-ai
Subject: stak
Cc: 

--- Benchmark stak run on ucbkim at Sat Oct 8 02:36:17 PDT 1983 by jkf
--- cpu usage: 2:36am up 3:06, 0 users, load average: 1.22, 1.22, 1.17
Franz Lisp, Opus 38.81

=> [fasl stak.o]
t
=> benchmark: test (file stak) , tranlinks: on, localf: no
executing form: (tak 18 12 6)
begin (43 173)
end (422 173)
runs 1
avg cpu time 6.316666666666667

benchmark: test (file stak) , tranlinks: off, localf: no
executing form: (tak 18 12 6)
begin (424 173)
end (1489 173)
runs 1
avg cpu time 17.75

nil
=> Franz Lisp, Opus 38.81

=> [fasl stak-l.o]
t
=> benchmark: test (file stak) , tranlinks: on, localf: yes
executing form: (tak 18 12 6)
begin (42 174)
end (232 174)
runs 1
avg cpu time 3.166666666666667

benchmark: test (file stak) , tranlinks: off, localf: yes
executing form: (tak 18 12 6)
begin (233 174)
end (422 174)
runs 1
avg cpu time 3.15

nil
=> Franz Lisp, Opus 38.81

=> [load stak.l]
[fasl benchmac.o]
t
=> benchmark: test (file stak) , tranlinks: on, interpreted 
executing form: (tak 18 12 6)
begin (58 175)
end (4744 175)
runs 1
avg cpu time 78.1

benchmark: test (file stak) , tranlinks: off, interpreted 
executing form: (tak 18 12 6)
begin (4746 175)
end (9472 175)
runs 1
avg cpu time 78.76666666666667

nil
=> --- cpu usage: 2:39am up 3:10, 0 users, load average: 1.29, 1.18, 1.16
--- end of benchmark stak


--- Benchmark stak run on ucbmatisse at Fri Oct 7 13:01:04 PDT 1983 by jkf
--- cpu usage: 1:01pm up 4 days, 47 mins, 4 users, load average: 1.08, 1.15, 1.28
Franz Lisp, Opus 38.81

=> [fasl stak.o]
t
=> benchmark: test (file stak) , tranlinks: on, localf: no
executing form: (tak 18 12 6)
begin (123 202)
end (794 202)
runs 1
avg cpu time 11.18333333333333

benchmark: test (file stak) , tranlinks: off, localf: no
executing form: (tak 18 12 6)
begin (797 202)
end (2356 202)
runs 1
avg cpu time 25.98333333333333

nil
=> Franz Lisp, Opus 38.81

=> [fasl stak-l.o]
t
=> benchmark: test (file stak) , tranlinks: on, localf: yes
executing form: (tak 18 12 6)
begin (122 200)
end (425 200)
runs 1
avg cpu time 5.05

benchmark: test (file stak) , tranlinks: off, localf: yes
executing form: (tak 18 12 6)
begin (427 200)
end (728 200)
runs 1
avg cpu time 5.016666666666667

nil
=> Franz Lisp, Opus 38.81

=> [load stak.l]
[fasl benchmac.o]
t
=> benchmark: test (file stak) , tranlinks: on, interpreted 
executing form: (tak 18 12 6)
begin (142 197)
end (8130 197)
runs 1
avg cpu time 133.1333333333333

benchmark: test (file stak) , tranlinks: off, interpreted 
executing form: (tak 18 12 6)
begin (8134 197)
end (16133 197)
runs 1
avg cpu time 133.3166666666667

nil
=> --- cpu usage: 1:07pm up 4 days, 53 mins, 4 users, load average: 1.52, 1.25, 1.27
--- end of benchmark stak


--- Benchmark stak run on ucbmike at Fri Oct 7 14:23:04 PDT 1983 by jkf
--- cpu usage: 2:23pm up 4:54, 0 users, load average: 1.01, 1.03, 1.03
Franz Lisp, Opus 38.79
-> [fasl stak.o]
t
-> benchmark: test (file stak) , tranlinks: on, localf: no
executing form: (tak 18 12 6)
begin (90 324)
end (760 324)
runs 1
avg cpu time 11.16666666666667

benchmark: test (file stak) , tranlinks: off, localf: no
executing form: (tak 18 12 6)
begin (764 324)
end (2444 324)
runs 1
avg cpu time 28.0

nil
-> Franz Lisp, Opus 38.79
-> [fasl stak-l.o]
t
-> benchmark: test (file stak) , tranlinks: on, localf: yes
executing form: (tak 18 12 6)
begin (93 324)
end (634 324)
runs 1
avg cpu time 9.016666666666667

benchmark: test (file stak) , tranlinks: off, localf: yes
executing form: (tak 18 12 6)
begin (636 324)
end (1177 324)
runs 1
avg cpu time 9.016666666666667

nil
-> Franz Lisp, Opus 38.79
-> [load stak.l]
[fasl benchmac.o]
t
-> benchmark: test (file stak) , tranlinks: on, interpreted 
executing form: (tak 18 12 6)
begin (112 327)
end (9447 327)
runs 1
avg cpu time 155.5833333333333

benchmark: test (file stak) , tranlinks: off, interpreted 
executing form: (tak 18 12 6)
begin (9452 327)
end (18798 327)
runs 1
avg cpu time 155.7666666666667

nil
-> --- cpu usage: 2:30pm up 5:02, 0 users, load average: 1.05, 1.03, 1.03
--- end of benchmark stak


∂10-Oct-83  0746	jkf@ucbkim 	takl    
Received: from UCBKIM by SU-AI with TCP/SMTP; 10 Oct 83  07:45:05 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA02455; Mon, 10 Oct 83 07:48:28 PDT
Date: Mon, 10 Oct 83 07:48:28 PDT
From: John Foderaro (on an h19-u) <jkf@ucbkim>
Message-Id: <8310101448.AA02455@ucbkim.ARPA>
To: rpg@su-ai
Subject: takl
Cc: 

--- Benchmark takl run on lbl-csam at Sun Oct 9 13:58:10 PDT 1983 by jkf
--- cpu usage: 1:58pm up 2 days, 5:50, 4 users, load average: 0.48, 0.79, 0.74
Franz Lisp, Opus 38.81
-> [fasl takl.o]
t
-> benchmark: test (file takl) , tranlinks: on, localf: no
executing form: (mas |18l| |12l| |6l|)
begin (43 174)
end (626 174)
runs 1
avg cpu time 9.716666666666667

benchmark: test (file takl) , tranlinks: off, localf: no
executing form: (mas |18l| |12l| |6l|)
begin (628 174)
end (2108 174)
runs 1
avg cpu time 24.66666666666667

nil
-> Franz Lisp, Opus 38.81
-> [fasl takl-l.o]
t
-> benchmark: test (file takl) , tranlinks: on, localf: yes
executing form: (mas |18l| |12l| |6l|)
begin (40 170)
end (410 170)
runs 1
avg cpu time 6.166666666666667

benchmark: test (file takl) , tranlinks: off, localf: yes
executing form: (mas |18l| |12l| |6l|)
begin (411 170)
end (782 170)
runs 1
avg cpu time 6.183333333333333

nil
-> Franz Lisp, Opus 38.81
-> [load takl.l]
[fasl benchmac.o]
t
-> benchmark: test (file takl) , tranlinks: on, interpreted 
executing form: (mas |18l| |12l| |6l|)
begin (62 175)
end (34703 175)
runs 1
avg cpu time 577.35

benchmark: test (file takl) , tranlinks: off, interpreted 
executing form: (mas |18l| |12l| |6l|)
begin (34706 175)
end (69388 175)
runs 1
avg cpu time 578.0333333333333

nil
-> --- cpu usage: 2:19pm up 2 days, 6:12, 3 users, load average: 1.05, 1.15, 1.05
--- end of benchmark takl


--- Benchmark takl run on ucbmatisse at Sun Oct 9 13:54:25 PDT 1983 by jkf
--- cpu usage: 1:54pm up 1 day, 22:23, 4 users, load average: 1.33, 1.11, 0.96
Franz Lisp, Opus 38.81

=> [fasl takl.o]
t
=> benchmark: test (file takl) , tranlinks: on, localf: no
executing form: (mas |18l| |12l| |6l|)
begin (121 201)
end (1225 201)
runs 1
avg cpu time 18.4

benchmark: test (file takl) , tranlinks: off, localf: no
executing form: (mas |18l| |12l| |6l|)
begin (1227 201)
end (4048 201)
runs 1
avg cpu time 47.01666666666667

nil
=> Franz Lisp, Opus 38.81

=> [fasl takl-l.o]
t
=> benchmark: test (file takl) , tranlinks: on, localf: yes
executing form: (mas |18l| |12l| |6l|)
begin (140 215)
end (896 215)
runs 1
avg cpu time 12.6

benchmark: test (file takl) , tranlinks: off, localf: yes
executing form: (mas |18l| |12l| |6l|)
begin (900 215)
end (1642 215)
runs 1
avg cpu time 12.36666666666667

nil
=> Franz Lisp, Opus 38.81

=> [load takl.l]
[fasl benchmac.o]
t
=> benchmark: test (file takl) , tranlinks: on, interpreted 
executing form: (mas |18l| |12l| |6l|)
begin (155 207)
end (62342 207)
runs 1
avg cpu time 1036.45

benchmark: test (file takl) , tranlinks: off, interpreted 
executing form: (mas |18l| |12l| |6l|)
begin (62347 207)
end (121727 207)
runs 1
avg cpu time 989.6666666666667

nil
=> --- cpu usage: 2:58pm up 1 day, 23:27, 2 users, load average: 1.15, 1.19, 1.43
--- end of benchmark takl



--- Benchmark takl run on ucbmike at Sun Oct 9 13:32:34 PDT 1983 by jkf
--- cpu usage: 1:32pm up 2 days, 4:04, 2 users, load average: 1.02, 0.52, 0.33
Franz Lisp, Opus 38.79
-> [fasl takl.o]
t
-> benchmark: test (file takl) , tranlinks: on, localf: no
executing form: (mas |18l| |12l| |6l|)
begin (89 324)
end (1274 324)
runs 1
avg cpu time 19.75

benchmark: test (file takl) , tranlinks: off, localf: no
executing form: (mas |18l| |12l| |6l|)
begin (1278 324)
end (4233 324)
runs 1
avg cpu time 49.25

nil
-> Franz Lisp, Opus 38.79
-> [fasl takl-l.o]
t
-> benchmark: test (file takl) , tranlinks: on, localf: yes
executing form: (mas |18l| |12l| |6l|)
begin (88 325)
end (1059 325)
runs 1
avg cpu time 16.18333333333333

benchmark: test (file takl) , tranlinks: off, localf: yes
executing form: (mas |18l| |12l| |6l|)
begin (1062 325)
end (2034 325)
runs 1
avg cpu time 16.2

nil
-> Franz Lisp, Opus 38.79
-> [load takl.l]
[fasl benchmac.o]
t
-> benchmark: test (file takl) , tranlinks: on, interpreted 
executing form: (mas |18l| |12l| |6l|)
begin (190 394)
end (68706 394)
runs 1
avg cpu time 1141.933333333333

benchmark: test (file takl) , tranlinks: off, interpreted 
executing form: (mas |18l| |12l| |6l|)
begin (68711 394)
end (137223 394)
runs 1
avg cpu time 1141.866666666667

nil
-> --- cpu usage: 2:15pm up 2 days, 4:47, 2 users, load average: 1.07, 1.05, 0.99
--- end of benchmark takl


Wow!
I really appreciate those results. My records show that I don't have
750 or Sun times for these:

TAK, FRPOLY, TAKR, PUZZLE, DERIV, DDERIV, FDDERIV, FFT, TRAVERS, TPRINT,
TRIANG.

If I had these times, I'd have just about everything I want from Franz, and
I could declare it a total success. Please hang in there!
			-rpg-
∂12-Oct-83  0715	jkf%ucbkim@Berkeley 	tprint   
Received: from UCB-VAX by SU-AI with TCP/SMTP; 12 Oct 83  07:15:16 PDT
Received: from ucbkim.ARPA by ucbvax.ARPA (4.12/4.7)
	id AA05294; Wed, 12 Oct 83 06:37:01 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA29739; Wed, 12 Oct 83 06:30:54 PDT
Date: Wed, 12 Oct 83 06:30:54 PDT
From: John Foderaro (on an h19-u) <jkf%ucbkim@Berkeley>
Message-Id: <8310121330.AA29739@ucbkim.ARPA>
To: rpg@su-ai
Subject: tprint

--- Benchmark tprint run on ucbkim at Wed Oct 12 06:14:38 PDT 1983 by jkf
--- cpu usage: 6:14am up 1 day, 18:42, 9 users, load average: 0.94, 0.64, 0.36
Franz Lisp, Opus 38.81

=> [fasl tprint.o]
t
=> benchmark: test (file tprint) , tranlinks: on, localf: no
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (72 182)
end (103 182)
runs 1
avg cpu time 0.5166666666666667

benchmark: test (file tprint) , tranlinks: off, localf: no
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (104 182)
end (135 182)
runs 1
avg cpu time 0.5166666666666667

nil
=> Franz Lisp, Opus 38.81

=> [fasl tprint-l.o]
t
=> benchmark: test (file tprint) , tranlinks: on, localf: yes
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6) ijk5 ((uvw1 stu0 stu0 qrs9 qrs9 opq8) mno7 (stu0
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (62 181)
end (94 181)
runs 1
avg cpu time 0.5333333333333333

benchmark: test (file tprint) , tranlinks: off, localf: yes
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (95 181)
end (124 181)
runs 1
avg cpu time 0.4833333333333333

nil
=> Franz Lisp, Opus 38.81

=> [load tprint.l]
[fasl benchmac.o]
t
=> benchmark: test (file tprint) , tranlinks: on, interpreted 
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (1006 662)
end (1039 662)
runs 1
avg cpu time 0.55

benchmark: test (file tprint) , tranlinks: off, interpreted 
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (1042 662)
end (1073 662)
runs 1
avg cpu time 0.5166666666666667

nil
=> --- cpu usage: 6:15am up 1 day, 18:43, 9 users, load average: 1.59, 0.88, 0.47
--- end of benchmark tprint


--- Benchmark tprint run on ucbmatisse at Mon Oct 10 07:56:19 PDT 1983 by jkf
--- cpu usage: 7:56am up 2 days, 16:25, 3 users, load average: 0.97, 0.55, 0.28
Franz Lisp, Opus 38.81

=> [fasl tprint.o]
t
=> benchmark: test (file tprint) , tranlinks: on, localf: no
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (110 202)
end (162 202)
runs 1
avg cpu time 0.8666666666666667

benchmark: test (file tprint) , tranlinks: off, localf: no
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6) ijk5 ((uvw1 stu0 stu0 qrs9 qrs9 opq8) mno7 (stu0
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (165 202)
end (217 202)
runs 1
avg cpu time 0.8666666666666667

nil
=> Franz Lisp, Opus 38.81

=> [fasl tprint-l.o]
t
=> benchmark: test (file tprint) , tranlinks: on, localf: yes
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6) ijk5 ((uvw1 stu0 stu0 qrs9 qrs9 opq8) mno7 (stu0
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (98 203)
end (151 203)
runs 1
avg cpu time 0.8833333333333333

benchmark: test (file tprint) , tranlinks: off, localf: yes
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6) ijk5 ((uvw1 stu0 stu0 qrs9 qrs9 opq8) mno7 (stu0
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (152 203)
end (204 203)
runs 1
avg cpu time 0.8666666666666667

nil
=> Franz Lisp, Opus 38.81

=> [load tprint.l]
[fasl benchmac.o]
t
=> benchmark: test (file tprint) , tranlinks: on, interpreted 
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6) ijk5 ((uvw1 stu0 stu0 qrs9 qrs9 opq8) mno7 (stu0
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (1628 1010)
end (1680 1010)
runs 1
avg cpu time 0.8666666666666667

benchmark: test (file tprint) , tranlinks: off, interpreted 
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6) ijk5 ((uvw1 stu0 stu0 qrs9 qrs9 opq8) mno7 (stu0
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (1684 1010)
end (1736 1010)
runs 1
avg cpu time 0.8666666666666667

nil
=> --- cpu usage: 7:57am up 2 days, 16:26, 2 users, load average: 1.24, 0.73, 0.37
--- end of benchmark tprint


--- Benchmark tprint run on ucbmike at Wed Oct 12 06:20:53 PDT 1983 by jkf
--- cpu usage: 6:20am up 14:32, 1 users, load average: 0.73, 0.54, 0.31
Franz Lisp, Opus 38.79
-> [fasl tprint.o]
t
-> benchmark: test (file tprint) , tranlinks: on, localf: no
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6) ijk5 ((uvw1 stu0 stu0 qrs9 qrs9 opq8) mno7 (stu0
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (137 340)
end (236 340)
runs 1
avg cpu time 1.65

benchmark: test (file tprint) , tranlinks: off, localf: no
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6) ijk5 ((uvw1 stu0 stu0 qrs9 qrs9 opq8) mno7 (stu0
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (239 340)
end (336 340)
runs 1
avg cpu time 1.616666666666667

nil
-> Franz Lisp, Opus 38.79
-> [fasl tprint-l.o]
t
-> benchmark: test (file tprint) , tranlinks: on, localf: yes
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6) ijk5 ((uvw1 stu0 stu0 qrs9 qrs9 opq8) mno7 (stu0
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (116 341)
end (213 341)
runs 1
avg cpu time 1.616666666666667

benchmark: test (file tprint) , tranlinks: off, localf: yes
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6) ijk5 ((uvw1 stu0 stu0 qrs9 qrs9 opq8) mno7 (stu0
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (215 341)
end (314 341)
runs 1
avg cpu time 1.65

nil
-> Franz Lisp, Opus 38.79
-> [load tprint.l]
[fasl benchmac.o]
t
-> benchmark: test (file tprint) , tranlinks: on, interpreted 
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6) ijk5 ((uvw1 stu0 stu0 qrs9 qrs9 opq8) mno7 (stu0
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (725 412)
end (821 412)
runs 1
avg cpu time 1.6

benchmark: test (file tprint) , tranlinks: off, interpreted 
executing form: (print test-pattern)
((((((678.0 567.0 567.0 456.0 456.0 |345c|) |234b| (567.0 456.0 456.0 |345c| |34
qrs9 qrs9 opq8 opq8 mno7) klm6) ijk5 ((uvw1 stu0 stu0 qrs9 qrs9 opq8) mno7 (stu0
 qrs9 qrs9 opq8 opq8 mno7) klm6 (qrs9 opq8 opq8 mno7 mno7 klm6) ijk5) ghi4) efg3
) cde2) abc1)begin (826 412)
end (921 412)
runs 1
avg cpu time 1.583333333333333

nil
-> --- cpu usage: 6:22am up 14:33, 1 users, load average: 0.96, 0.67, 0.38
--- end of benchmark tprint


∂15-Oct-83  1041	jkf@ucbkim 	benchmarks   
Received: from UCBKIM by SU-AI with TCP/SMTP; 15 Oct 83  10:41:08 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA00601; Sat, 15 Oct 83 10:44:06 PDT
Date: Sat, 15 Oct 83 10:44:06 PDT
From: John Foderaro (on an h19-u) <jkf@ucbkim>
Message-Id: <8310151744.AA00601@ucbkim.ARPA>
To: rpg@su-ai
Subject: benchmarks
Cc: 

  It appears from your benchmark table that you are counting gc time too.
I find this to be very unfair as the amount of time spent in gc can be
easily controlled by preallocation of space.   I'm running these benchmarks
in our standard version of lisp which is purposefully small to permit a
large number to be run simultaneously.   Furthermore only a small portion of
the address space is preallocated, the rest being allocated upon demand.
Contrast this with NIL, which is 8 megabytes and may not even have to gc.
If you insist on adding gc times, I'll preallocate space and run them
in our vaxima sized lisp, so we get comparable times.

CPU time
How large is your initial allocation? In the final accounting, of course,
I will compare CPU, CPU + GC, Real time, etc. 
The real problem, for me, isn't to get non-GC time from you, but to get
GC time from NIL! Here is a chart with CPU times only:

     Benchmark     | 780 Franz | 750 Franz | 750 NIL | Franz 68000 |
-------------------------------------------------------------------|
                   |           |           |         |             |
       Boyer       |   40.17   |   60.55   |  81.33  |    64.38    |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
      Browse       |   78.25   |  122.67   | 1099.84 |   137.38    |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
     Destruct      |   6.95    |   8.68    |  8.95   |    13.98    |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
Traverse           |           |           |         |             |
    Initialize     |   18.47   |   32.98   |  38.93  |      -      |
     Traverse      |   82.98   |  132.63   | 273.26  |      -      |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
        Tak        |    8.3    |   14.8    |  4.16   |      -      |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
       STak        |   6.32    |   11.18   |  23.14  |    11.17    |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
       CTak        |   12.05   |   18.35   |  9.92   |    20.75    |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
       Takl        |   9.72    |   18.4    |  39.13  |    19.75    |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
       Takr        |   3.62    |   5.09    |  5.71   |      -      |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
       Deriv       |     -     |     -     |  22.69  |      -      |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
      DDeriv       |     -     |     -     |  26.9   |      -      |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
      Fdderiv      |     -     |     -     |  26.45  |      -      |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
Div2               |           |           |         |             |
     Iterative     |    3.2    |   5.37    |  9.44   |    7.95     |
     Recursive     |   7.68    |   11.3    |  14.7   |    11.13    |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
        FFT        |     -     |     -     |  35.59  |      -      |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
      Puzzle       |     -     |     -     | 497.85  |      -      |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
      Triang       |     -     |     -     | 649.73  |      -      |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
      Fprint       |   0.62    |   1.23    |  37.65  |    1.43     |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
       Fread       |   1.17    |   2.02    |  27.78  |    2.29     |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
      Tprint       |   0.52    |   0.87    |  47.55  |    1.65     |
                   |           |           |         |             |
-------------------------------------------------------------------|
                   |           |           |         |             |
Frpoly             |           |           |         |             |
 Power = 2         |           |           |         |             |
  r=x+y+z+1        |   0.02    |   0.05    |  0.04   |      -      |
  r2=1000*r        |   0.02    |   0.03    |  0.15   |      -      |
  r3=r in flonums  |   0.03    |   0.03    |  0.05   |      -      |
                   |           |           |         |             |
 Power = 5         |           |           |         |             |
  r=x+y+z+1        |   0.02    |   0.35    |  0.36   |      -      |
  r2=1000*r        |   0.38    |   1.18    |  2.15   |      -      |
  r3=r in flonums  |   0.22    |   1.02    |  0.42   |      -      |
                   |           |           |         |             |
 Power = 10        |           |           |         |             |
  r=x+y+z+1        |   2.33    |   5.37    |  3.85   |      -      |
  r2=1000*r        |    8.0    |   14.95   |  38.71  |      -      |
  r3=r in flonums  |    2.6    |   7.27    |  4.64   |      -      |
                   |           |           |         |             |
 Power = 15        |           |           |         |             |
  r=x+y+z+1        |   20.6    |   37.23   |  24.93  |      -      |
  r2=1000*r        |   86.5    |  155.45   | 479.48  |      -      |
  r3=r in flonums  |   25.08   |   41.47   |  30.7   |      -      |
                   |           |           |         |             |
-------------------------------------------------------------------|
T 
∂08-Oct-83  1946	JONL.PA@PARC-MAXC.ARPA 	fyi   
Received: from PARC-MAXC by SU-AI with TCP/SMTP; 8 Oct 83  19:44:31 PDT
Date: 8 OCT 83 19:38 PDT
From: JONL.PA@PARC-MAXC.ARPA
Subject: fyi
To: rpg@sail.ARPA

Date:  4 OCT 83 01:09 PDT
From: JONL.PA
Subject: Reminders from RPG's presentation last Thursday
To:   LispCore↑


Here is a not-at-all complete list of some things we wanted to be reminded of,
after hearing Dick's presentation last Thursday.

Items brought out by the TRAVERSE benchmark:
  Try pointers for the flags instead of FLAGs
  Try to find out how successful it would be if the compiler had
    1) a better/shorter means to convert a FLAG field into True-or-False
    2) a "Jump on zero" instruction, or a variety of other such conditional
       jumps, to reduce the overhead on tight loops
  Find out how much FVAR lookup is costing -- the toplevel call here binds a
    fvar, COUNT, and each recursive call updates it.  thus each frame ought to
    have to look only one frame back to find the location of the binding cell.
  Try CLISPDEC fast for FFETCHFIELD and FREPLACEFIELD  [these runs were
    made in my "abc" loadup, so already have the fast fetchfield declaration 
    on.  We should re-run them with fast replacefield too.]
  How big can this test be [I was able to create a structure of depth 175, but
    not much bigger, some time ago.  Limit is primarily virtual address space.
    Maybe this could go deeper now if the new MDS scheme is working.  Time on 
    a Dorado for this creation was 43 seconds; time for 250 traversals was 112
    secs.]

Items brought up by the FFT benchmark:
  I gathered extensive statistics last February, and sent out a note to 
    LispCore↑ on the results (stats are still on <gabriel> directory).  Blame 
    was focused mostly on GC-related activities -- e.g., \GCMAPTABLE, 
    \GCMAPSCAN, \GCRECLAIMCELL, MAKEFLOATNUMBER, \HTFIND etc.  Spectrum was
    slightly different between Dorado/Dolphin again due to differences in 
    microcode support.  E.g., \GCMAPTABLE cost was 77.6% of the total time on
    the Dorado, but only 0.15% on Dolphin ! 
  Could a "dirty" sysout explain the GC lossage?  I.e., as one continues to 
    run, more and more things seem to clog the GC refcount table, so maybe ...
    But the stats I took in February contained 4 cases -- Clean Dorado, Dirty
    Dorado, Clean Dolphin and Dirty Dolphin.  Differences were in the 7% range.
    A tabulation of the data is in the note mailed out in Feb (with subject 
    line something like "FFT:  The Baaaad news"
  What's the differential between using the CMLARRAY arrays and standard
    Interlisp arrays?  For the FFT benchmark, which does a lot of array work,
    the advantage of "open coding" the array access was obscured by the GC
    times;  but the PUZZLE benchmark, using CMLARRAY, runs in 14.1 seconds
    on a (display-down) Dorado, whereas EPUZZLE, using ELT and SETA, runs
    on a (display-down) Dorado in 34.0 seconds -- an overall improvement of a
    factor of 2.5.   This picture would change if ELT/SETA were in microcode,
    and AREF/ASET were not; but considering the extended functionality of 
    the latter, we might do well to consider putting them into microcode 
    first (and just "punt"ing on performance on Interlisp-10).
    
Items brought out by our "bewilderment" at certain comparisons:
  Why is VAX/NIL so much slower than Franz on the TAK benchmark?
    I suggested it was due to the fact that NIL builds a true function frame, 
    so that Lisp-written debuggers can work -- Franz just uses the "C" stack,
    which has essentially nothing on it except args and return-addresses (and
    of course the hardware stuff put on by the vax CALLS instruction)
  Why is SAIL so much slower (relatively speaking) on certain CONS-heavy
    benchmarks, such as TRAVERSE, DERIV etc.   Dick said that GC time is
    being reported by SAIL times, but most other systems have excluded it.
  Why does the Dorado compare so poorly on the DERIV and DDERIV
    benchmarks with the Dolphin?  I see the answer in the stats -- I'm not 
    sure I had copies of them with me Thurs morning, but they're in my files
    and on the <gabriel> directory.  For example, on DDERIV, of the top 4 funs
    on the Dorado run, three appear to be losing due to differences in amount
    of microcode support:
        Dorado rank      Dorado percentage    Dolphin percentage
      1. \GCRECLAIMCELL       9.56%                 1.93%
      2. \GCMAPSCAN           9.46%                 7.40%
      3. \FREELISTCELL        8.87%                 0.41%
      4. \RPLCONS             8.13%                 0.46%
    On the other hand, FINDPTRSBUFFER consumed almost 20% of the Dolphin
    run, but only 3.84% of the Dorado run.  I don't have any ideas on this one.
    The LM2 seemd to shine on these benches -- at first we thought it might
    have been due to the fact that they have (the equivalent of) GETP in
    microcode.   DDERIV is the "data-driven" program, which one would think
    would call GETP a lot; but the stats show
           function          Dorado percentage     Dolphin percentage
          GETPROP                2.03%                   2.83%
          GETPROPLIST             .87%                   1.11%

Items brought up under "what could we suggest as a benchmark, to show what
  kinds of things the group has been working on the past couple of years":
  Use of windows on a bit-mapped display
    1) How about line-drawing?  e.g., the POLY demonstration program
    2) How about a bunch of windows, moving about (e.g., the WINKER or
       MOVER demos from AAAI), or "popping up for beneath one another".
  Swapping performance;  how about plotting a curve of performance over time
    as the BOYER benchmark is repeated, say, hundreds of times.  Systems that 
    don't GC at all will, either die, or degrade substantially on swapping.
  GC costs -- most of the other systems just run willy-nilly, doing 
    essentially none of the GC requirements.  Interlisp-D is continuously 
    paying the penalty due to the reference count scheme.  How about something
    that would force the GC to be factored in also?
  Multi-processing.  Let's call this "multi-tasking", or "co-routining".  
    Either way, we could compare the time to run one of Dick's standard 
    benchmarks three times in a row (serially) with that required to run three
    copies of it "in parallel" by multi-tasking.  The increase would be the 
    cost of using the multi-tasking facilities.  Explicit places in the 
    benchmark code to be multi-tasked would have "yield" calls in them; this 
    way, we could rule out a fake multi-tasking which just ran one to 
    completion, then the next, and so on.



Things yet to do:
  Some more Stats printouts are needed.  The following benchmarks already have
    .PRINTOUT files on the <GABRIEL> directory (to which LispCore↑ has
    access):  TAK, DESTRUCT, DERIV, DDERIV, and FFT.    I ran BOYER again
    this evening, trying to collect stats;  doradodisk crashed out of space 
    after about four megabytes of .stats file.  Due to a bug in the stats 
    collector, I haven't been able to generate the .printout file from this 
    remnant.  I reported this bug last February when we first tried to stats 
    out the BOYER program;  Bill said he'd take a look at it "soon", since he 
    is hacking the stats package for other reasons right now.
  Another look has to be taken at the ETRIANG code.  The figures didn't come
    out at all reasonable for it -- I probably flubbed some type in massaging
    the TRIANG code (ETRIANG is just the TRIANG benchmark using ELT and SETA 
    rather than CMLARRAY.)
  We need more benchmarks for arithmetic.  There  is one series I proposed for
    quotient -- HASHPASSWORD16 and HASHPASSWORD29 -- which are versions of the
    algorithm used in the NS world (and in Interlisp-D too!) to encrypt a 
    users's password.  the ...16 version uses 16-bit arithmetic, and the ...29
    version uses at most 29-bit arithmetic (so that Interlisp/VAX can run it 
    -- that's why it isn't full 32-bit arithmetic).   We also have to convert
    to Interlisp the extensive MACHAR file -- machine architecture -- which 
    "snoops" at the machines ability to do faithful floating point operations.
    MacLisp code for it is on <gabriel> directory.    [p.s. the I've already 
    done the HASHPASSWORD series, and it suggests that substantially more 
    microcode support is available for large integers (between 16 and 32 bits)
    in the Dolphin than in the Dorado; possibly same would be true if Dolphin 
    were compared to current DLion?]
  We need BIGNUMS, so that we can run the FRPOLY benchmark
  We need to assess some priorities on what to work on.  Dick suggested that
    improvement in the BROWSE and TRAVERSE benchmarks would impress
    Teknowledge, since they are doing these structure-groveling and pattern-
    matching kinds of operations.  FFT (maybe) would have impressed Fairchild 
    and Atari were it better.

∂15-Oct-83  1227	jkf@ucbkim 	Re: CPU time 
Received: from UCBKIM by SU-AI with TCP/SMTP; 15 Oct 83  12:27:18 PDT
Received: by ucbkim.ARPA (4.6/4.2)
	id AA02242; Sat, 15 Oct 83 12:30:25 PDT
Date: Sat, 15 Oct 83 12:30:25 PDT
From: John Foderaro (on an aaa-60-s) <jkf@ucbkim>
Message-Id: <8310151930.AA02242@ucbkim.ARPA>
To: RPG@SU-AI, jkf@ucbkim
Subject: Re: CPU time 
Cc: 
In-Reply-To: Your message of 15 Oct 83  1126 PDT

  Our 'standard' lisp starts out with 50 K bytes of list space, of which
half in used, leaving around 3K list cells free.
  In the standard lisp the lisp space can grow to about 2300K bytes of
list space.  If I preallocated that much at the beginning,
I would get 288K free list cells.
  The vaxima sized lisp is approximately twice as large, so you can double
the figures for the standard lisp.
  

To JKF on Nov 30, 1983
Hi 
I'm trying to finish up the benchmarking game by mid-january. Here's
what I still need from Franz. Because Franz is standard against which many
Lisp are measured, I'd like to have the best results for it.

Traverse for 68000
Tak for 68000
Takr for 68000
Deriv, Dderiv, Fdderiv for 780, 750, 68000 (I have some results for
them, but not  all combinations of Translinks and localfs
FFT for everything
Puzzle for everything
Triang for everything

I hope you can find the time to batch these through for me within the
next month or so. Thanks for your help in this project.
			-rpg-
∂09-Nov-83  2240	SCHOEN@SUMEX-AIM.ARPA 	Performance comparisons on D-machines with expert systems
Received: from SUMEX-AIM by SU-AI with TCP/SMTP; 9 Nov 83  22:39:57 PST
Date: Wed 9 Nov 83 21:54:42-PST
From: Eric Schoen <Schoen@SUMEX-AIM.ARPA>
Subject: Performance comparisons on D-machines with expert systems
To: 1100users@SUMEX-AIM.ARPA

I can offer a little information about the Dipmeter Advisor's performance on
all three D-machines.  Although not based on EMYCIN, I would assume it
exercises a similar computational mix.  A significance difference which does
come to mind, however, is that the Advisor is used in more of a production
mode than a development mode (I don't know how important this is, though).

The Advisor performs several types of computations.  Chief among these
are:

	Data reduction, involving intensive matrix calculations 
	over several thousand data points making up the pre-
	computed formation dip data.

	Forward-chained deductions, utilizing a collection of
	some 90 rules, driven by pattern detection routines
	which are currently implemented by Interlisp code.
	Deductions are represented by tokens on a global blackboard.

	Highly interactive well-log graphics, offering both 
	smooth and jump scrolling, zooming, and interfaces into
	the data inspector for examination of detected patterns.

Overall, the system is implemented as a set of 11 discrete phases in
which distinct interpretation techniques are utilized.  Between each
phase, the system checkpoints itself by writing its collection of
active tokens to a "phase" file.

We run the system on each of the Dorado, Dolphin, and Dandelion.  As
advertised, no software changes are necessary to switch between machines.
All system development was performed on a Dolphin, where real memory was the
key to performance.  With 2 MB, the Advisor's performance is quite good
(Well-log interpretation is a slow process, even when running simple FORTRAN
jobs -- the environment tends to be batch- oriented on a heavily overloaded
timesharing system, such as a VAX 11/780).  Most interpretation phases,
especially those in which the computation tends to be more symbolic than
numeric, run in under a minute.  A great deal of time is spent writing the
phase files, which are simply PRIN1'd expressions.

The system's performance is roughly the same on the Dandelion, running
currently with 1.5MB of real memory.  There are a few differences; phase
files are written and later read back in somewhat faster (even with having to
go through a gateway to get to our 3 MB file server).  On the other
hand, interactive graphics is a bit more sluggish.  We find we are
paging more (smooth scrolling is realized through the use of precomputed
bitmaps, each 3 screenfuls high), and that the BITBLT function is 
slightly slower.  Floating point calculations are significantly slower,
as is to be expected given no microcode support for these operations yet.
The Dandelion is the ultimate delivery vehicle for the Advisor in
field use.  As far as we can tell, with the addition of microcode and
greater real memory, the Dandelion should outperform the Dolphin in
almost every respect when running the Advisor.

As you'd expect, the Dorado executes the Advisor an order of magnitude
faster than the Dolphin.  The only exception seems to be floating point
calculations, which are only twice as fast.  The apparent discrepency
here is explained by the Dorado's not having a microcoded instruction
to box floating point numbers at the tail end of a microcoded floating point
instruction.

Eric
-------

∂28-Nov-83  2154	SCHOEN@SUMEX-AIM.ARPA 	Dolphin-Dandelion performance comparison  
Received: from SUMEX-AIM by SU-AI with TCP/SMTP; 28 Nov 83  21:54:35 PST
Date: Mon 28 Nov 83 21:24:46-PST
From: Eric Schoen <Schoen@SUMEX-AIM.ARPA>
Subject: Dolphin-Dandelion performance comparison
To: 1100users@SUMEX-AIM.ARPA

I recently performed a mini benchmark comparing the performance of the
Dolphin and Dandelion.  The task I just involved generating the graph
nodes to be fed to LAYOUTFOREST to display a progeny tree of a small
Strobe knowledge base (about 15-20 objects, reasonably well balanced).
The benchmark is skeletally:

	(DEFINEQ (GENERATE-TREE (LAMBDA (NODE)
		 (DECLARE (GLOBALVARS $$NODENAMES $$NODELST))
		 (for OBJECT in (PROGENY NODE) do
			(TCONC $$NODELST (create GRAPHNODE ...))
			(TCONC $$NODENAMES OBJECT)
			(GENERATE-TREE OBJECT)
		  unless (FMEMB OBJECT (CAR $$NODENAMES))))

Running in the same release of Interlisp on both machines, and using
TIMEALL with 100 repetitions of the f function, the Dandelion was
reliably 2.4 times faster in CPU time only, and about 1.6 times faster
in GC time.  I modified the benchmark a couple of times, replacing the
(efficient) call to PROGENY with messages to the object's SPECIALIZATION
slot, and then to the GET facet of the SPECIALIZATION slot (the last
version requiring a failed inheritance search and a second search up
the "datatype" hierarchy.  The message benchmarks involve searching
and list generation, and take respectively greater and greater CPU
time.  In addition, each generates respectively more garbage.  The
Dolphin maintained its 2.4 times speed advantage over the Dolphin
execution time, and its 1.6 times advantage in garbage collection.

Eric
-------

∂08-Dec-83  1241	CL.BOYER@UTEXAS-20.ARPA 	MREWRITE on 3600s Release 5.0 
Received: from UTEXAS-20 by SU-AI with TCP/SMTP; 8 Dec 83  12:41:28 PST
Date: Thu 8 Dec 83 14:41:12-CST
From:  Bob Boyer <CL.BOYER@UTEXAS-20.ARPA>
Subject: MREWRITE on 3600s Release 5.0
To: rpg@SU-AI.ARPA, dcp@MIT-MC.ARPA, cs.novak@UTEXAS-20.ARPA
cc: cmp.cohen@UTEXAS-20.ARPA, cmp.mksmith@UTEXAS-20.ARPA,
    cmp.good@UTEXAS-20.ARPA, cl.moore@UTEXAS-20.ARPA

We're beta testing Release 5.0 on the 3600.  The following
table reflect the new times for our MREWRITE benchmark. The
3600 equals a Dorado with the gc-on and is considerably
faster with the gc-off.  This is a dramatic improvement over
the results of Release 4.1.  Our experience confirms the
general experience that in normal interactive use, one can
easily leave the gc off all day.  When the IFU arrives,
if the average 30-40% speed up occurs, a 3600 will run
at 2060 Maclisp speeds.

MACLISP (2060)                                8.5        5.3    13.8
UCILISP (2060, (nouuo nil))                   9.3        4.7    14
INTERLISP (bcompl, blklib, swap, 2060)       11          6      17
ELISP  (2060)                                11          7      18
INTERLISP (bcompl, blklib, noswap, 2060)     11          7.5    18.5
ZETALISP (3600, gc off, 4mb,  Rel 5.0)                          19
INTERLISP (Dorado, bcompl, blklib)           18         11      29
ZETALISP (3600, gc on, 4meg, Rel 5.0)                           30.5
FRANZ (all localf, VAX-780)                  21         18      39
INTERLISP (bcompl, no blklib, noswap, 2060)  39          7      46
FRANZ (translink=on, VAX-780)                37         17      54
PSL   (HP 9836, MC68000 8mb)                 (breakdown n/a)    67
INTERLISP (bcompl, no blklib, swap, 2060)    64          6      70
INTERLISP (VAX-780)                          80          3      83
ZETALISP (LM-2, 1meg, gc-on)          breakdown not available   97
PSL   (HP 9836, MC68000 3mb)                 (breakdown n/a)   114
FRANZ VAX-780 (translink=nil)               130         18     148
INTERLISP (Dolphin, 1meg, bcompl, blklib)   132         35     167

Further details of these tests can be found in
[UTEXAS-20]<CL.BOYER>LISP.COMPARISONS.
-------

∂26-Jan-84  1048	WVANROGGEN@DEC-MARLBORO.ARPA 	common lisp benchmarks   
Received: from DEC-MARLBORO by SU-AI with TCP/SMTP; 26 Jan 84  10:47:09 PST
Date: Thu 26 Jan 84 13:32:44-EST
From: "Walter van Roggen" <WVANROGGEN@DEC-MARLBORO.ARPA>
Subject: common lisp benchmarks
To: rpg@SU-AI.ARPA, pw@SU-AI.ARPA

Sorry I didn't reply earlier, but I had gone on vacation for a week just
as you had sent me the code.

I found that there were a bunch of things that were non-Common Lisp
in the code you sent. Besides things like ASSQ, there was a use of
something like (APPLY #'QUOTE ...) which is quite illegal. But some
things ran just fine, I just added
(PROCLAIM '(OPTIMIZE (SPEED 3) (SAFETY 0) (SPACE 0)))
at the top of the file and sprinkled in a few fixnum declarations
where it seemed obvious. I stilll haven't tried half of the files,
and haven't even thought about converting DDERIV.

I haven't done a thorough job yet of timing the ones I have tried,
either, but I can give approximate timings for some. These were
run on a (loaded) VAX-11/780 with a a working set limit of 1500
pages, but I think actually ran with more due to a much larger
working set extent. I'm not sure how to treat GC timings.
I'll mention whether they occurred from a fresh lisp. I generally
ran things a couple of times for an average. Times are seconds of
CPU as measured with the TIME macro (yes, Common Lisp does specify
timing functions).

BOYER	50 (no gc)
BROWSE	64 (no gc)
CTAK	22
DERIV	27 (with 1 gc, ~13 seconds)
DESTRU	7.6 (no gc)
DIV2	4.7 (no gc)
	8.4 (no gc)
FRPOLY	.13
	1.2
	26
	95 (I don't remember any GC's, but there may have been some;
	    if so, I've subtracted an estimate of their times before
	    averaging)
STAK	9.7
TAKL	23
TAKR	3.6
TRTAK	2.26
TAK	1.96

I also tried TRIANG, but gave up after 10 (?) cpu minutes.
GC time typically depends on how much non-garbage there is (it's
stop and copy (compacting of course)). The TAK functions needed
(PROCLAIM '(FTYPE (FUNCTION (FIXNUM FIXNUM FIXNUM) FIXNUM) tak trtak)).
We don't do tail recursion removal yet. We'll be speeding up
special variable access significantly sometime. The catch/throw
mechanism is going to be redone eventually. More immediately,
array accessing needs a lot of work.

I didn't get a copy of the Franz versions of the benchmarks.
Could you send them too? More interesting would be your figures
for the times of all the different implementations you happen
to have now.

			---Walter
-------

∂23-Jan-84  1344	@SUMEX-AIM.ARPA:VANBUER@USC-ECL 	performance of transcendental functions in Interlisp
Received: from SUMEX-AIM by SU-AI with TCP/SMTP; 23 Jan 84  13:43:57 PST
Received: from USC-ECL by SUMEX-AIM.ARPA with TCP; Mon 23 Jan 84 13:11:19-PST
Date: 23 Jan 1984 1306-PST
From: VANBUER%USC-ECL@SRI-NIC
Subject: performance of transcendental functions in Interlisp
To:   1100users at SUMEX-AIM

There has been a program circulating on USENET to test the speed and accuracy
of trancendental functions on various systems.  The Interlisp version is:

TANSPEED [LAMBDA NIL
  (PROG((A 1.0) I)
	(for old I from 1 to 2499 do
		(SETQ A (FPLUS (TAN (ARCTAN [ANTILOG (LOG (SQRT (FTIMES A A]
					    T)
				    T)
				1.0)))
	(printout T "LOOPS =" I " RESULT = " A T]

I have run this on the 2060 at ECL, Vax interlisp at ISI and on my 2 MByte
Dolphin (both original and newly written versions of transcendental fns)
and for comparison, VAX C double-precision library on a 780.  In every case,
the value of loops is 2500 at completion.

Summary:
Machine		Time(sec)	Result A	Average float boxes/iteration
-------		---------	--------	-----------------------------
2060 Lsp interp	8.4		2514.995	7
 "    "  compil 3.7		  "		7
 "    " SETN	4.3		   "		6

VAX 780 L int	127		2516.6		100
 "   "  L comp  102		  "		100
 "   "  C dbl	3.9		2500.00000012	NA

Dolphin interp	487		-11889.27 *1	160
Dol new transc.
	interp	235		2476.246	91
	compil	175		  "		91

Note *1  The original dolphin routines had gradually run A up to 10183.65 by
iteration 2208, at which point ARCTAN was so close to the singularity given
its precision, it crept past pi/2.

The original Dolphin routines use linear interpolation for the "principle"
range of most functions, the new routines use polynomial approximations taken
from the numerical analysis literature.  LOG, ANTILOG and SQRT also use
representation-specific tricks to substantially speed up initial ranging.

The Interlisp 10 trancendental routines are in DEC-10 assemble format,
with the algorithms taken from the Fortran library.  Note that the only
number boxing is the result of each function call, with registers and local
variables of the assembly holding all other intermediate.

The Interlisp D is forced to do all it's floating point arithmetic with
large number boxes, which makes up a large part of the time.

The VAX versions are also clearly doing all of their floating point arithmetic
in Lisp number boxes, as can be seen from the number of boxes per iteration and
20 times slower than C (actually, it's worse, to get double precision accuracy,
the C routines need longer polynomials).

Accuracy: the DEC10 and VAX show almost comparable accuracy with their 36 bit
and 32 bit representations, both are about as good as can be done with
available precision.  The Dolphin's slightly lower accuracy is primarily a
result of the 7-digit print format for floating mantissa, making it impossible
to specify the full 24-bit mantissa accuracy available for the constants in
the various polynomials (worst for constants like pi/2=1.57... where only
21 bits can be specified).
The original D routines use of linear interpolation limits accuracy in
areas of high second derivitives (e.g. SIN near 90 deg, exp near 1).
	Darrel J. Van Buer, SDC
ps:  The improved versions for the Dolphin will be sent to MAXC in a few days.	DJVB
-------

∂23-Jan-84  1803	@SUMEX-AIM.ARPA:JONL.PA@PARC-MAXC.ARPA 	SQRT et al. performance in Interlisp-D  
Received: from SUMEX-AIM by SU-AI with TCP/SMTP; 23 Jan 84  18:03:14 PST
Received: from PARC-MAXC.ARPA by SUMEX-AIM.ARPA with TCP; Mon 23 Jan 84 17:25:52-PST
Date: 23 JAN 84 17:16 PST
From: JONL.PA@PARC-MAXC.ARPA
Subject: SQRT et al. performance in Interlisp-D
To: vanBuer%USC-ECL@SRI-NIC.ARPA
cc: 1100Users@SUMEX-AIM.ARPA


Re your message of 23 Jan 84 13:06 PST

Shortly after the last release (the Fugue.4 one) of Interlisp-D, I got
"bitten" by the lack of accuracy/speed in SQRT, and replaced it by a
fairly optimal one.  [[if this isn't in the Carol release message, it 
should be]].

I couldn't say for sure now that the other functions you send us will
make it into the Carol release (any day now!!), but I'll pore over them
to be sure the can be included in the release after Carol.


--------------------

∂25-Jan-84  1737	@SUMEX-AIM.ARPA:VANBUER@USC-ECL 	vax transcendental update  
Received: from SUMEX-AIM by SU-AI with TCP/SMTP; 25 Jan 84  17:37:39 PST
Received: from USC-ECL by SUMEX-AIM.ARPA with TCP; Wed 25 Jan 84 17:04:05-PST
Date: 25 Jan 1984 1659-PST
From: VANBUER%USC-ECL@SRI-NIC
Subject: vax transcendental update
To:   1100users at SUMEX-AIM

Word from the implementers of VAX interlisp at ISI is that the transcendental
routines do escape to C, but SQRT was overlooked, so almost all the time
was spent in SQRT.  Repeating the run without SQRT leads to an estimate
for timing of 28 seconds and 13 seconds respectively for interpreted and
compiled execution, once SQRT is fixed (in place of 126 and 101 seconds).

I also recived word from Xerox that Mesa has a microcoded SQRT for the Dolphin
with speed comprable to float QUOTIENT, but also that Lisp has absolutely no
space for it  [probably why DRAWCIRCLE is so slow!]
	Darrel J. Van Buer
-------

∂19-Jan-84  1643	MASINTER.PA@PARC-MAXC.ARPA 	varia  
Received: from PARC-MAXC by SU-AI with TCP/SMTP; 19 Jan 84  16:43:24 PST
Date: 19 JAN 84 16:43 PST
From: MASINTER.PA@PARC-MAXC.ARPA
Subject: varia
To: RPG@SU-AI.ARPA

I was out of town the last couple of days (I went to POPL in 
Salt Lake City.)

I think you can run any benchmarks you want, as far as I am concerned.

What do you think the issue is? (I.e., the dandelion is released
and Stanford has some, etc.)

The "wide bodied" machine is of course a different issue since
there aren't any out in the world.

---

The issue is  (was) that even though Schlumberger had dandelions a long time
ago, you didn't want me to touch them then. I'm trying to live up to the
spirit of our agreement.

CSLI has some, but I doubt that CSD does.

WideBody: In that event, I will do the tests on it as part of the HPP
benchmarking. I'll release the numbers to no one but you and HPP until
you've delivered one. Or you can let me do them at our leisure with the
same arrangement.

Shall I feel free to publish the dorado numbers? I will redo them with Jonl's
improvements later.
			-rpg-
∂28-Dec-83  0843	@SUMEX-AIM.ARPA:VANBUER@USC-ECL 	Re: Why so slow???    
Received: from SUMEX-AIM by SU-AI with TCP/SMTP; 28 Dec 83  08:43:25 PST
Received: from USC-ECL by SUMEX-AIM.ARPA with TCP; Wed 28 Dec 83 08:36:06-PST
Date: 28 Dec 1983 0832-PST
From: VANBUER%USC-ECL@SRI-NIC
Subject: Re: Why so slow???
To:   "Brand Hal" at LLL-MFE, DOLPHIN-USERS at SUMEX-AIM
cc:   VANBUER at USC-ECL

In response to the message sent  Tue, 27 Dec 83 16:12 PST from "Brand Hal"@LLL-MFE.ARPA

An even better way to sped up I/O is to use streams (which now
also work in Interlisp 10 and Interlisp VAX) e.g.

(lambda(f)
	(prog ((inf(GETSTREAM F 'INPUT))
		(outf(GETSTREAM NIL 'OUTPUT)))
	      (RETURN(PROG1(until (EOFP inf) count (BOUT outf (BIN inf)))
			   (CLOSEF inf)))))

Even using higher level functions like PRINT, streams are still faster since
the ultimate micorcode-level stream support need a stream, and it will be
passed down by PRINT.
εBIN and BOUT never leave microcode when a stream is provided; otherwise they
trap out to the slow macrocoded versions (but which have all the error checks
and coercions needed.  Actually it will still leave microcode for block
boundaries, etc, but these are minor cases)
	Darrel
-------

∂28-Dec-83  1040	SCHMIDT@SUMEX-AIM.ARPA 	Interlisp-10 streams 
Received: from SUMEX-AIM by SU-AI with TCP/SMTP; 28 Dec 83  10:40:07 PST
Date: Wed 28 Dec 83 10:17:32-PST
From: Christopher Schmidt <SCHMIDT@SUMEX-AIM.ARPA>
Subject: Interlisp-10 streams
To: Dolphin-Users@SUMEX-AIM.ARPA

	Note that to use Darrel's stream approch in Interlisp-10, you must
load the DFOR10.COM lispusers package first.  Note that in Interlisp-10,
EOFP and CLOSEF require the filename instead of the stream number (JFN).
     --Christopher
3←(DEFINEQ (TEST (LAMBDA (F)
    (bind (INFP ← (OPENSTREAM F 'INPUT)) (OUTFP ← (GETSTREAM NIL 'OUTPUT))
     until (EOFP (JFNS INFP))
     count (BOUT OUTFP (BIN INFP))
     finally (CLOSEF (JFNS INFP))]
5←(PPT TEST) ~prints the CLISP translation.  Fn must be dwimified first.
		(TEST
		  (LAMBDA (F)  **COMMENT**
		    (PROG ((INFP (OPENSTREAM F 'INPUT))
			   (OUTFP (GETSTREAM NIL 'OUTPUT))
			   ($$VAL 0))
		      $$LP(COND
			    ((EOFP (JFNS INFP))
			      (GO $$OUT)))
			  (AND (BOUT OUTFP (BIN INFP))
			       (SETQ $$VAL (ADD1 $$VAL)))
		      $$ITERATE
			  (GO $$LP)
		      $$OUT
			  (CLOSEF (JFNS INFP))
			  (RETURN $$VAL))))
-------

∂29-Dec-83  2157	@SUMEX-AIM.ARPA:JONL.PA@PARC-MAXC.ARPA 	Re: Why so slow???  
Received: from SUMEX-AIM by SU-AI with TCP/SMTP; 29 Dec 83  21:57:46 PST
Received: from PARC-MAXC.ARPA by SUMEX-AIM.ARPA with TCP; Thu 29 Dec 83 21:50:57-PST
Date: 29 DEC 83 21:36 PST
From: JONL.PA@PARC-MAXC.ARPA
Subject: Re: Why so slow???
To: "Brand Hal"@LLL-MFE.ARPA, vanBuer%USC-ECL@sri-nic.ARPA
cc: Dolphin-Users@Sumex.ARPA

The function that I think Darrel meant to say was OPENSTREAM, rather than
GETSTREAM; the latter may be viewed as a "coercion" function, but "coercion"
never means to open a file not already open.

The problem of "coercion" is that too often users fall into the trap that
Hal Brand did -- namely READC, BIN, PRINT, BOUT etc are all willing to
try to coerce a partialy specified file name into a fully-qualified file
name, *** and then try GETSTREAM on it.  So if you open a file with name
FOO, you may notice that OPENFILE returns something like {PHYLUM}<JONL>FOO.BAR;3
How did it manage to "flesh out" the FOO into the full name?  By a process
that evidently takes 2.5 seconds in some cases (e.g., it may have to enumerate
all the files on a given diretory of a file server, looking for matches, and
looking for the "right" version number).

Clearly, that coercion isn't something you want done on each character inputted
or outputted.  One should always (well, almost) save the output of OPENFILE
and/or OPENSTREAM and use it as the "file" or "stream" argument in subsequent
operations.

∂27-Dec-83  1659	@SUMEX-AIM.ARPA:BrandHal@LLL-MFE.ARPA 	Why so slow???  
Received: from SUMEX-AIM by SU-AI with TCP/SMTP; 27 Dec 83  16:58:49 PST
Received: from LLL-MFE by SUMEX-AIM.ARPA with TCP; Tue 27 Dec 83 16:25:10-PST
Date: Tue, 27 Dec 83 16:12 PST
From: "Brand Hal"@LLL-MFE.ARPA
Subject: Why so slow???
To: DOLPHIN-USERS@SUMEX-AIM.ARPA

   We recently got a Dolphin - hurrah!   Having an interest in InterLisp and
the Dolphin, I decic
d to try using the machine.   My first problem was to choose
a problem.   I did a nice recursive FACT.   Then, I decided to try some I/O.
I chose to write a LISP program to count the number of characters in a file.
The code follows:
	(LAMBDA (F)
		(PROG (C (N 0))
			(OPENFILE F (QUOTE INPUT))
		   LOOP
			(COND
				((READP F)
				 (SETQ C (READC F))
				 (SETQ N (ADD1 N))
				 (PRIN1 C))
				(T (CLOSEF F)
				   (RETURN N)))
			(GO LOOP)))
The program was compiled (for speed???) and ran at the rate of 2.5 seconds
per character!!!!!!   Does anybody know why its so very very very SLOW????
Also, if any InterLisp programmers have any comments about this code, I would
like to hear the criticism - must learn, and advice from experts is appreciated.
						Hal Brand
						BRAND@LLL-MFE.ARPA

∂27-Dec-83  1707	SCHMIDT@SUMEX-AIM.ARPA 	Re: Why so slow???   
Received: from SUMEX-AIM by SU-AI with TCP/SMTP; 27 Dec 83  17:07:27 PST
Date: Tue 27 Dec 83 16:49:59-PST
From: Christopher Schmidt <SCHMIDT@SUMEX-AIM.ARPA>
Subject: Re: Why so slow???
To: "Brand Hal"@LLL-MFE.ARPA
cc: DOLPHIN-USERS@SUMEX-AIM.ARPA
In-Reply-To: Message from ""Brand Hal"@LLL-MFE.ARPA" of Tue 27 Dec 83 16:26:25-PST

	The problem with your byte-counting function is that you don't save
the "file pointer" that OPENFILE returns and pass it to the reading functions.
Instead you're passing the file name, which has to be dwimified into the file
pointer for each byte!
	Also (unrelated to speed), you could have made this code prettier with
CLISP.  Eg.,   (bind    (FP ← (OPENFILE F 'INPUT))
		while   (READP FP)
		count   (PRIN1 (READC FP))
		finally (CLOSEF FP))
--Christopher
-------

∂16-Dec-83  1457	PW  	LispM evaluation issues  
The file 3600.EV[CL,PW] contains an expanded outline of information for
performance evaluation on the 3600 for your peruasal.  Comments are
welcome, of course.  I actually had to look at the microcode for some
of that stuff.

There is obviously a great deal of overlap between the CADR and the 3600
descriptions.  The easiest way to deal with this is to discuss one, then
look at the differences with the other.  Describing the CADR first is
historically correct, but the 3600 is currently more of a product of interest.
We may also need to factor in the  LMI Lambda.  What sounds best to you?

Garbage collection is also a problem.  We should probably discuss this.

∂21-Dec-83  2321	GSB@MIT-ML 	benchmarking 
Received: from MIT-ML by SU-AI with TCP/SMTP; 21 Dec 83  23:21:27 PST
Date: 22 December 1983 02:24 EST
From: Glenn S. Burke <GSB @ MIT-ML>
Subject: benchmarking
To: rpg @ SU-AI

I'm going to be coming out with a new release of nil in january.
Are you interested in be doing the benchmarks over again in it?
There will be some interesting differences, i think.  For instance:
there are now four formats of floating-point numbers.  The result?
Generic arithmetic, in general, slowed down a bit, becuase it to keep
the code from blowing up it has to be a little more modular and subroutinized.
But, the compiler has been tweeked all over the place.  Also, there is
something i don't think i ever mentioned to you before:  the nil "minisubrs",
those JSB routines (which do things like error-checking CAR and CDR, and
which also do lots of the simpler generic arithmetic functions) have
METERING CODE in them!!!  ALthough just a couple instructions, they are
"big" instructions with moby literals and several memory refernces.

For the record, the time on the generic-arithmetic TAK which i gave
you before was about 8.0 seconds.  I more recently got a time of about
9.0, which i presume was that arithmetic slowdown (probably mainly in
the < rather than 1- function).  HOWEVER, with the compiler tweeking
(which does some fairly gross and dumb stuff to function calls, but
very frequently results in an improvement), and if i experimentally
remove that metering code, the time goes down to about 5.3 seconds!
There might even be some instruction-cacheing effects in here, because
the code is smaller than it used to be, too;  i wouldn't know.

Aside from a frequent improvement in function-call setup, there are
various other minor optimizations in the compiler, so when it does
things like car, cdr, svref, etc., it is less likely to generate
extraneous MOVes through a register.  I will probably end up running
the benchmarks even if you aren't interested, if i have time, to see
what kind of improvements i've obtained over the past 6 months.

p.s.  The result of all of this is that the compiler is slower than
ever.  It's time for a redesigned compiler;  when that's done, then
we will REALLY see some benchmarking.  Right now, my best-case
estimate for having both the new compiler and a working
garbage-collector is a year from now.

p.p.s.  LCS today officially approved increased NIL support for the
coming year.

NIL
Yes. I will want to see the newest times. I'll try to incorporate new
data up until press time. Thanks for your efforts.
			-rpg-
∂29-Jan-84  1526	WVANROGGEN@DEC-MARLBORO.ARPA 	more on Vax Lisp timings 
Received: from DEC-MARLBORO by SU-AI with TCP/SMTP; 29 Jan 84  15:26:48 PST
Date: Sun 29 Jan 84 18:22:23-EST
From: "Walter van Roggen" <WVANROGGEN@DEC-MARLBORO.ARPA>
Subject: more on Vax Lisp timings
To: rpg@SU-AI.ARPA, pw@SU-AI.ARPA

I've spent some time trying the benchmarks again. One difference I've
noticed this time is that there are more frequent GC's. The reason is
that the lisp I tried originally didn't have an editor in it. What
follows are times with the editor present and after having done a COMPILE-FILE
in the lisp (and then loading the benchmark, of course).

All times are CPU seconds, with the second term showing the probability
of having a GC and the length of the GC if it did take place. I actually
ran most tests 10 times (some 5 or less, if they were long), all
consecutively. I ignore the probability of GC if it seems small.

	cpu + probability x gc

BOYER	55 + 1.3 x 22

BROWSE	59 + 0.3 x 24

DERIV	14 + 1.5 x 15

DESTRU	7.7 + 0.2 x 16

DIV2a	5.0 + 0.6 x 16
DIV2b	8.8 + 0.6 x 16

FRPOLY	0.03
	0.04
	0.03

	0.24
	0.40
	0.31

	2.3
	6.6 + 0.1 x 17
	3.2

	15
	38 + 1.0 x 15
	20 + 0.5 x 15

FPRINT	21
FREAD	15
TPRINT	28

CTAK	20 + 0.2 x 16
STAK	9.9
TAKL	23
TAKR	3.6
TAK	2.27
TRTAK	1.98

Of course, these figures are greatly influenced by the size of dynamic
memory. Without the editor, I got better times (also with fewer tries),
since there were much fewer GC's.

In comparing with your figures, we clearly need work on our I/O. The
reader is being rewritten with an observed improvement of 1/3. Special
variable references will be much faster when I have had a chance. GC's
will be faster when we move a lot of stuff into read-only space and
into static space. (System code vectors are in read-only already, and
system symbols in static.) Catch/throw needs to be redesigned. And
function calling itself will become slightly faster. Eventually we'll
get around to compiler optimizations like register allocation and
tail recursion removal. Oh, yes, array/vector stuff needs to be
optimized. But overall, I think we're doing OK so far.

In your list of times, what does "SAIL" refer to?

Could you send me a draft of your paper?

			---Walter
-------

SAIL is the Stanford AI Lab KL-10 running MacLisp. SAIL does not
page (nor does it swap often because if the large memory).

All of the people who have helped out will receive the paper.
			-rpg-
∂08-Feb-84  0217	JonL.pa@PARC-MAXC.ARPA 	Fourth Attempt! 
Received: from PARC-MAXC by SU-AI with TCP/SMTP; 8 Feb 84  02:17:18 PST
Date: 8 Feb 84 02:16 PST
From: JonL.pa@PARC-MAXC.ARPA
Subject: Fourth Attempt!
To: RPG@SU-AI.ARPA

 7-FEB-84 19:28:51-PST,2131;000000000001
Date:  7 FEB 84 19:28 PST
From: JONL.PA
Subject: 3rd attempt!
To:   RPG@SU-AI
cc:   JonL, RPG@SU-SCORE

Date:  4 FEB 84 01:33 PST
From: JONL.PA
Subject: Revised DLion Benchmark Timings
To:   RPG@SU-AI
cc:   JonL

It took most of this week to find and fix the microcode bug; it wasn't
that CONS wasn't in ucode, but that after doing the "cons", it would then
punt to Lisp code to do some statistics keeping -- a "punting" which normally
occurs only every 10000 calls.  It was a reversed conditional test (which,
sigh, I'm finding out is easy to overlook in ucode), whose only effect was to
call the stats keeper more often than necessary.

I re-ran any "suspicious" timing, and here are the results

BenchMark     Total   CPU    GC    SWAP   Comments
------------------------------------------------------
DERIV          76.2   23.9   52.2   --
DDERIV         92.8   33.3   59.5   --    Average of 4 trials
DIV2 (iter)    32.3   23.8    8.5   --    Average of 2 trials
DIV2 (rec)     33.3   24.8    8.5   --    Average of 4 trials
DESTRUCTIVE    26.85  17.6    9.27  --    Average of 4 trials
BROWSE        300.0   174.0 126.0   .256
BOYER         119.0    74.6  44.4   --    Average of 2 trials
FPRINT         13.7    13.3    .4   --    Average of 2 trials
FREAD           8.15    8.0    .13  --    Average of 4 trials


The latter two, oddly enough, do have a measurable amount of CONS time,
and in fact it seems to have made a 28% difference in FPRINT (I'm not
sure why the difference is so small for FREAD -- only 11%).  

By the way, I conclude from comparing the DIV2 recursive results from last 
week and this evening, that we had ** reversed ** the reported CPU and GC 
times in our recording of them; better to find this out now rather than later!


A new SYSOUT has been loaded up this evening, and likely that is the one
which will be "released" early next week.


The Xerox LispCore group certainly owes you a "Thanks" -- without the
vigilance of your benchmarking activity, this bug might have gone unnoticed
for years [as a similar one in the Dorado in fact did].



∂26-Jan-84  2117	JONL.PA@PARC-MAXC.ARPA 	fyi   
Received: from PARC-MAXC by SU-AI with TCP/SMTP; 26 Jan 84  21:17:28 PST
Date: 26 JAN 84 21:08 PST
From: JONL.PA@PARC-MAXC.ARPA
Subject: fyi
To: RPG@SU-AI.ARPA

Date: 26 Jan 84 12:32 PST
From: masinter.pa
Subject: [Dave Dyer <DDYER@USC-ISIB>: [Dave Dyer       <DDYER@USC-ISIB>:
Lesson 2,342,123 in benchmarking lisps]
To: LISPCORE↑

Did I forward this message from Dyer before? 3600 slower than Dolphin?

     ----- Fowarded Messages -----

Received: from USC-ISIF.ARPA by PARC-MAXC.ARPA; 25 JAN 84 16:52:24 PST
Date: 25 Jan 84 16:51 PST
Subject: [Dave Dyer <DDYER@USC-ISIB>: [Dave Dyer       <DDYER@USC-ISIB>:
Lesson 2,342,123 in benchmarking lisps]]
From: Tom Lipkis <LIPKIS@USC-ISIF.ARPA>
To: masinter.pa

Here it is.  The 2.8 sec for the 3600 isn't really comparable to the other
times, and those are all hash arrays instead of alists.  With alists all
the other numbers went down too.  Much has been done since then, and its
about twice as fast.  I'll be moving it to the 3600 again in a few weeks,
but if you'd like an up to date comparison before then let me know and I
can do it sometime this week.

Tom
                ---------------

Return-Path: <DDYER@USC-ISIB>
Received: FROM USC-ISIB.ARPA BY USC-ISIF.ARPA WITH TCP ; 10 Nov 83
23:18:44 PST
Date: 10 Nov 1983 2317-PST
Subject: [Dave Dyer       <DDYER@USC-ISIB>: Lesson 2,342,123 in
benchmarking lisps]
From: Dave Dyer <DDYER@USC-ISIB>
To: masinter@PARC-MAXC
cc: lipkis


 I hope you find this amusing.  I feel Symbolics owes you at least
one good laugh.

                ---------------

Date: 10 Nov 1983 2305-PST
Subject: Lesson 2,342,123 in benchmarking lisps
From: Dave Dyer       <DDYER@USC-ISIB>
To: ddyer@SCRC-TENEX
cc: moon@SCRC-TENEX, dlw@SCRC-TENEX, mlb@SCRC-TENEX, hic@SCRC-TENEX,
    hgb@SCRC-TENEX


 This amusing note is to add a bit of resigned cynicism to those
who like to quote speeds of machines, ours, theirs, or otherwise.

 A group at ISI an BBN has been writing a replacement for KL-ONE
which is called NIKL.  Nikl runs like a greased pig compared with
KL-ONE, and is showing signs of health, so as is the custom,
benchmarking games are being played. The score on a standardized
short test was;

  	Vax 750    2.54  sec.
	Dolphin    2.47	 sec.
	Vax 780    1.92	 sec.
	Jericho    1.8	 sec.
	Dandelion  1.3   sec.
	Dorado	   0.3   sec. (estimated, test hasn't been run)

  Now, we said, wouldn't it be amusing to pass this beast throug
the converter and try it on the 3600 --- a nice test of the
compatability
package, and maybe provide some competition for the dorado by running on
another fast machine.  Well, it passed through easily and runs on
the lisp machine. It's time?

	3600	   3.6   sec.

 Gasp!   So, clearly something is wrong, right?  A bit of investigation
shows that the heaviest function is MAPHASH, and the lispm seems to
prefer creating hash tables of size 88 (requested size is 10).  Never
fear, a magic switch changes the representation to ALIST's and the time
is now...
	3600	  2.8	sec.

 Still trailing the pack.  Why? Who knows.  Maybe CONS is still
an order of magnitude slower than it should be.


 The lesson is obvious.  Either 3600's are the slowest lisp
machine ever built, or benchmark timings don't mean a damn thing.
-------
-------
-------

     ----- End of Forwarded Messages ----
-

∂27-Jan-84  1637	WVANROGGEN@DEC-MARLBORO.ARPA 	Common Lisp timings 
Received: from DEC-MARLBORO by SU-AI with TCP/SMTP; 27 Jan 84  16:36:55 PST
Date: Fri 27 Jan 84 19:32:27-EST
From: "Walter van Roggen" <WVANROGGEN@DEC-MARLBORO.ARPA>
Subject: Common Lisp timings
To: rpg@SU-AI.ARPA, pw@SU-AI.ARPA

Hmmm. I wonder if there isn't some awful bug in our Lisp that either
causes incorrect code to be compiled or bad timings to be given. I think
our times are much too fast compared to Franz. I should run them again
to make sure. Is there any way I can find out if it's really running
correctly? I did notice FRPOLY returning huge lists of polynomials.
The timings for FRPOLY were totals of the three for the four lines.
TAK ran in 2.26, with tail recursion removed took 1.96. Although I
intend to implement tail recursion removal, I haven't done so yet,
so the 1.96 time for TAK is not true.

I tried PUZZLE and found that it seemed to run forever. (This is
another reason I'm thinking there must be an awful bug. But this is
also more likely, since it involves arrays, whereas the others
just dealt with lists and symbols and fixnums.) What can I do to
determine that progress is always being made?

			---Walter
-------

∂27-Jan-84  1639	WVANROGGEN@DEC-MARLBORO.ARPA 	DERIV time
Received: from DEC-MARLBORO by SU-AI with TCP/SMTP; 27 Jan 84  16:39:33 PST
Date: Fri 27 Jan 84 19:40:07-EST
From: "Walter van Roggen" <WVANROGGEN@DEC-MARLBORO.ARPA>
Subject: DERIV time
To: rpg@SU-AI.ARPA, pw@SU-AI.ARPA

not 40.0, but 27 including one GC time, which was about 13.
Due to the inaccuracies of the timer, I've generally used only
two digits in my numbers.
			---Walter
-------

∂29-Jan-84  1526	WVANROGGEN@DEC-MARLBORO.ARPA 	more on Vax Lisp timings 
Received: from DEC-MARLBORO by SU-AI with TCP/SMTP; 29 Jan 84  15:26:48 PST
Date: Sun 29 Jan 84 18:22:23-EST
From: "Walter van Roggen" <WVANROGGEN@DEC-MARLBORO.ARPA>
Subject: more on Vax Lisp timings
To: rpg@SU-AI.ARPA, pw@SU-AI.ARPA

I've spent some time trying the benchmarks again. One difference I've
noticed this time is that there are more frequent GC's. The reason is
that the lisp I tried originally didn't have an editor in it. What
follows are times with the editor present and after having done a COMPILE-FILE
in the lisp (and then loading the benchmark, of course).

All times are CPU seconds, with the second term showing the probability
of having a GC and the length of the GC if it did take place. I actually
ran most tests 10 times (some 5 or less, if they were long), all
consecutively. I ignore the probability of GC if it seems small.

BOYER	55 + 1.3 x 22

BROWSE	59 + 0.3 x 24

DERIV	14 + 1.5 x 15

DESTRU	7.7 + 0.2 x 16

DIV2a	5.0 + 0.6 x 16
DIV2b	8.8 + 0.6 x 16

FRPOLY	0.03
	0.04
	0.03

	0.24
	0.40
	0.31

	2.3
	6.6 + 0.1 x 17
	3.2

	15
	38 + 1.0 x 15
	20 + 0.5 x 15

FPRINT	21
FREAD	15
TPRINT	28

CTAK	20 + 0.2 x 16
STAK	9.9
TAKL	23
TAKR	3.6
TAK	2.27
TRTAK	1.98

Of course, these figures are greatly influenced by the size of dynamic
memory. Without the editor, I got better times (also with fewer tries),
since there were much fewer GC's.

In comparing with your figures, we clearly need work on our I/O. The
reader is being rewritten with an observed improvement of 1/3. Special
variable references will be much faster when I have had a chance. GC's
will be faster when we move a lot of stuff into read-only space and
into static space. (System code vectors are in read-only already, and
system symbols in static.) Catch/throw needs to be redesigned. And
function calling itself will become slightly faster. Eventually we'll
get around to compiler optimizations like register allocation and
tail recursion removal. Oh, yes, array/vector stuff needs to be
optimized. But overall, I think we're doing OK so far.

In your list of times, what does "SAIL" refer to?

Could you send me a draft of your paper?

			---Walter
-------

SAIL is the Stanford AI Lab KL-10 running MacLisp. SAIL does not
page (nor does it swap often because if the large memory).

All of the people who have helped out will receive the paper.
			-rpg-
∂04-Feb-84  0953	fateman%ucbdali@Berkeley 	favor    
Received: from UCB-VAX by SU-AI with TCP/SMTP; 4 Feb 84  09:53:42 PST
Received: from ucbdali.ARPA by UCB-VAX.ARPA (4.22/4.21)
	id AA20874; Sat, 4 Feb 84 09:46:51 pst
Received: by ucbdali.ARPA (4.22/4.22)
	id AA25083; Sat, 4 Feb 84 09:53:21 pst
Date: Sat, 4 Feb 84 09:53:21 pst
From: fateman%ucbdali@Berkeley (Richard Fateman)
Message-Id: <8402041753.AA25083@ucbdali.ARPA>
To: rpg@su-ai
Subject: favor

Can you give me (even partial) info on this:
if we are offered an LMI Lambda machine or a Symbolics 3600 at the same price
(effectly $0, to me), is there a rational basis for choosing one
over the other on speed, reliability, software?

Also, do you have any evidence that T runs programs faster than Franz,
PSL, Ylisp or ...?

1. LMI has not chosen to run my benchmarks yet, mainly because the Lambda
machine runs at 50% clock speed now. I suspect that the machines will
be of comparable speed, so I wouldn't choose based on that.
The 3600's we have are reasonable reliable, but they break down more often
than, say, 780's. The 3600 has a lot more software and seems to have made
a larger tangible committment to Common Lisp.

The thing that would decide it for me is the relative sizes of the operations.
Symbolics is large and has the good graces of ARPA right now. LMI will depend
on TI's good will to stay alive.

2. I have no evidence one way or the other.
∂06-Feb-84  1214	ROD   	Re:  T and franz  
 ∂06-Feb-84  0918	NET-ORIGIN@MIT-MC 	Re:  T and franz
Received: from MIT-MC by SU-AI with TCP/SMTP; 6 Feb 84  09:18:35 PST
Received: from MIT-MC by MIT-OZ via Chaosnet; 6 Feb 84 12:12-EST
Received: from ucbkim.ARPA by UCB-VAX.ARPA (4.22/4.21)
	id AA24465; Mon, 6 Feb 84 08:41:00 pst
Received: from UCB-VAX.ARPA (ucbvax.ARPA) by ucbkim.ARPA (4.16/4.22)
	id AA21857; Mon, 6 Feb 84 08:42:25 pst
Received: from ucbdali.ARPA by UCB-VAX.ARPA (4.22/4.21)
	id AA24376; Mon, 6 Feb 84 08:35:43 pst
Received: by ucbdali.ARPA (4.22/4.22)
	id AA10308; Mon, 6 Feb 84 08:42:06 pst
Date: Mon, 6 Feb 84 08:42:06 pst
From: fateman%ucbdali@Berkeley (Richard Fateman)
Message-Id: <8402061642.AA10308@ucbdali.ARPA>
To: Olin.Shivers@CMU-CS-H
Subject: Re:  T and franz
Cc: franz-friends@Berkeley

	Subject: T and franz
	
	Hmm. It strikes me that there have been a good many wild claims made on
	both side of this argument.  Maybe I can clear a few up.
Ditto, otherwise I'd just keep my mouth shut...
	
	The professor that announced we shouldn't use franz was Geoff Hinton.
Of course Geoff might have provided evidence of slowness.  You still haven't.	
	 There has only been one paper published
	on T ("T:  a Dialect of Lisp" in the Conference Record of the '82 
	Symposium on
	LISP and Functional Programming, August 1982).  I just read through 
	it again; it
	makes no claims at all about speedup factors over franz.
Having read the first version as a member of the program committee, and
hence a referee, I asked that the statement be supported or removed.
It was removed.
	
	
	The only fast lisps I know of are Maclisp, Zetalisp, and T.
Don't you yet understand that your OPINION is not being questioned.  I
know what you THINK is true.  You don't want to be confused by the facts.
I truly do not know how fast T is in comparison to Franz, but I can tell
you that there are benchmarks illustrating that Franz on a vax 780 is faster
than a Symbolics 3600, for some purposes.  Perhaps RPG@SU-AI will have
some information, someday.
	
	Too bad Barry had such a rough time getting T up at Harvard. I had no problems
	here at CMU.
Are you using 4.2BSD UNIX on a VAX?  If so I would think you'd do us all
a favor and make it available.  Because we haven't been able to get it to
work here.

	 He's designed a language so clean, elegant and powerful it brings tears to your eyes.
Yes, but why call it Lisp?  How about ONION  (Onion's Not it's Original Name ?)
(Sorry Jon..)

I look forward to a working 4.2BSD T (at a price I can afford), and some
benchmark data.

	

∂08-Feb-84  0217	JonL.pa@PARC-MAXC.ARPA 	Fourth Attempt! 
Received: from PARC-MAXC by SU-AI with TCP/SMTP; 8 Feb 84  02:17:18 PST
Date: 8 Feb 84 02:16 PST
From: JonL.pa@PARC-MAXC.ARPA
Subject: Fourth Attempt!
To: RPG@SU-AI.ARPA

 7-FEB-84 19:28:51-PST,2131;000000000001
Date:  7 FEB 84 19:28 PST
From: JONL.PA
Subject: 3rd attempt!
To:   RPG@SU-AI
cc:   JonL, RPG@SU-SCORE

Date:  4 FEB 84 01:33 PST
From: JONL.PA
Subject: Revised DLion Benchmark Timings
To:   RPG@SU-AI
cc:   JonL

It took most of this week to find and fix the microcode bug; it wasn't
that CONS wasn't in ucode, but that after doing the "cons", it would then
punt to Lisp code to do some statistics keeping -- a "punting" which normally
occurs only every 10000 calls.  It was a reversed conditional test (which,
sigh, I'm finding out is easy to overlook in ucode), whose only effect was to
call the stats keeper more often than necessary.

I re-ran any "suspicious" timing, and here are the results

BenchMark     Total   CPU    GC    SWAP   Comments
------------------------------------------------------
DERIV          76.2   23.9   52.2   --
DDERIV         92.8   33.3   59.5   --    Average of 4 trials
DIV2 (iter)    32.3   23.8    8.5   --    Average of 2 trials
DIV2 (rec)     33.3   24.8    8.5   --    Average of 4 trials
DESTRUCTIVE    26.85  17.6    9.27  --    Average of 4 trials
BROWSE        300.0   174.0 126.0   .256
BOYER         119.0    74.6  44.4   --    Average of 2 trials
FPRINT         13.7    13.3    .4   --    Average of 2 trials
FREAD           8.15    8.0    .13  --    Average of 4 trials


The latter two, oddly enough, do have a measurable amount of CONS time,
and in fact it seems to have made a 28% difference in FPRINT (I'm not
sure why the difference is so small for FREAD -- only 11%).  

By the way, I conclude from comparing the DIV2 recursive results from last 
week and this evening, that we had ** reversed ** the reported CPU and GC 
times in our recording of them; better to find this out now rather than later!


A new SYSOUT has been loaded up this evening, and likely that is the one
which will be "released" early next week.


The Xerox LispCore group certainly owes you a "Thanks" -- without the
vigilance of your benchmarking activity, this bug might have gone unnoticed
for years [as a similar one in the Dorado in fact did].



∂09-Feb-84  1325	KESSLER@UTAH-20.ARPA 	Re: Benchmarks    
Received: from UTAH-20 by SU-AI with TCP/SMTP; 9 Feb 84  13:24:55 PST
Date: Thu 9 Feb 84 14:22:52-MST
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: Re: Benchmarks    
To: RPG@SU-AI.ARPA
In-Reply-To: Message from "Dick Gabriel <RPG@SU-AI>" of Thu 9 Feb 84 12:59:00-MST

Sure, I'm always interested in doing some timing comparisons.  With our new
effort in a better compiler, it would be good to get a good suite of tests
to be able to run and compare with previous PSL's and other Lisp's as well
(As I'm sure you will see when you read our Lisp Conference paper, we are
still in the implementation/design phase and don't yet have some good hard
numbers...  Hopefully by the time the conference rolls around we will be
able to report some favorable comparisons using these tests).  Tryg has
said that the 3081 numbers are very impressive, so maybe he will also be
willing to convert some of the code.

Please send me the tests, along with whatever has already been translated.
I can run them on our 20, Vax 750 and 780 and Apollo (I think Martin will
run the ones on the 9836).

Bob.
-------

∂10-Feb-84  0032	JONL.PA@PARC-MAXC.ARPA 	Re: Results
Received: from PARC-MAXC by SU-AI with TCP/SMTP; 10 Feb 84  00:32:31 PST
Date: 10 FEB 84 00:30 PST
From: JONL.PA@PARC-MAXC.ARPA
Subject: Re: Results
To: RPG@SAIL.ARPA, jonl.PA@SAIL.ARPA
cc: JONL.PA@PARC-MAXC.ARPA

In response to the message sent  09 Feb 84 11:53 PST from RPG@SU-AI.ARPA

Right, no room for Floating-Point ucode in 4K control-store DLion.
No room for BIN either (the Interlisp equivalent of TYI), which makes
quite a difference in the FREAD benchmark -- BIN is mostly just doing
the equivalent of an ILDB on the PDP10, but the macrocode for it
seems to be the bottleneck in FREAD.

Floatin-point ucode is in the 12K control store, but not quite ready
yet.  Purcell said something about Stanfrob people not even getting to
see much of anything because they hadn't converted their code to
withstand the changes of this upcoming release; also, I don't think he's
got enough of it debugged to make any real runs on right now.  But the
Schlumberger people are "waiting with bated breath" for it, since they
have a lot of numeric-intensive application programs.

∂21-Feb-84  1341	RBATES@USC-ISIB 	Re: Benchmarks    
Received: from USC-ISIB by SU-AI with TCP/SMTP; 21 Feb 84  13:41:09 PST
Date: 21 Feb 1984 13:39-PST
Sender: RBATES@USC-ISIB
Subject: Re: Benchmarks    
From: Raymond Bates <RBATES at ISIB>
To: RPG@SU-AI
Cc: JONL.PA@PARC-MAXC, feber@USC-ISIB
Message-ID: <[USC-ISIB]21-Feb-84 13:39:49.RBATES>
In-Reply-To: The message of 14 Feb 84  1421 PST from Dick Gabriel <RPG@SU-AI>

Here are the results of the Benchmarks for ISI-Interlisp.  The only
one I didn't do was TPRINT.  It doesn't make any sense in ISI-Interlisp
or Interlisp-10 since both have TTY interface to the user.  

	
BOYER:
←(TIME (SETUP]
224 conses
0.096 seconds
T
←(TIME (TEST]
226500 conses
64.928 seconds
(T 64928 0)

BROWSE:
←(TIME (BROWSE]
489007 conses
155.232 seconds
NIL

CTAK:
←(TIME (TAK 18 12 6]
0 conses
35.456 seconds
7

DDERIV:
←(TIME (RUN]
260032 conses
38.272 seconds
NIL

DERIV:
←(TIME (RUN]
245030 conses
31.824 seconds
NIL

DESTRUCTIVE:
←(TIME (DESTRUCTIVE 600 50]
43110 conses
18.304 seconds
((1 1 2) (1 1 1) (1 1 1 2) (1 1 1 1) (1 1 1 1 2) (1 1 1 1 2) (1 1 1 1 2) (1 1 1 
1 2) (1 1 1 1 2) (1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 3))

DIV2:
←(TIME (FTEST1]
0 conses
0.064 seconds
NIL
←(TIME (TEST1]
0 conses
0.064 seconds
NIL
←(TIME (TEST2]
0 conses
0.064 seconds

FPRINT:
←(TIME (FPRINT]
222 conses
0.752 seconds
/lisp/rbates/lisp/dick/FPRINT.TST;1

FREAD:
/lisp/rbates/lisp/dick/FREAD.V;1
←(TIME (FREAD]
45 conses
0.128 seconds
/lisp/rbates/lisp/dick/FPRINT.TST;1

STAK:
/lisp/rbates/lisp/dick/STAK.V;1
←(TIME (TAK 18 12 6]
0 conses
10.288 seconds
7

TAK:
←(TIME (TAK 18 12 6]
0 conses
3.184 seconds
7
←(TIME (TAK 1018 1012 1006]
0 conses
3.168 seconds
1007

TAKL:
←(TIME (TAKL 18L 12L 6L ]
0 conses
10.016 seconds
(7 6 5 4 3 2 1)

TAKR:
←(TIME (TAKR]
0 conses
5.504 seconds
7

FFT:
←(TIME (FFT RE IM))
4 conses
27.488 seconds
T

FFFT:
←(TIME (FFFT RE IM))
3 conses
26.192 seconds
T

SFFT:
←(TIME (SFFT ARE AIM]
4 conses
31.744 seconds
T

TRAVERSE:
←(TIMIT]
36857 conses
29.024 seconds
31.567 seconds, real TIME
0 conses
406.752 seconds
432.667 seconds, real TIME
100

PUZZLE:
←(TIME (START))

Success in 2005 trials.
0 conses
123.088 seconds
NIL



/Ray

∂21-Feb-84  1712	RBATES@USC-ISIB 	Re: Results  
Received: from USC-ISIB by SU-AI with TCP/SMTP; 21 Feb 84  17:12:00 PST
Date: 21 Feb 1984 17:08-PST
Sender: RBATES@USC-ISIB
Subject: Re: Results  
From: Raymond Bates <RBATES at ISIB>
To: RPG@SU-AI
Cc: feber@USC-ISIB, mcgreal@ADA-VAX
Message-ID: <[USC-ISIB]21-Feb-84 17:08:35.RBATES>
In-Reply-To: The message of 21 Feb 84  1534 PST from Dick Gabriel <RPG@SU-AI>

Here are the missing benchmarks:
	
TRIANG:
←(TIME (TEST]
11635 conses
1122.4 seconds
T

DIV2:
←(TIME (FTEST1 L]
120015 conses
9.808 seconds
(NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL
 NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL
 NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL
 NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL
 NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL
)
←(TIME (TEST1 L]
120014 conses
9.472 seconds
NIL
15←(TIME (TEST2 L]
120015 conses
14.544 seconds
NIL

/Ray

∂21-Feb-84  2152	RBATES@USC-ISIB 	Re: By the way    
Received: from USC-ISIB by SU-AI with TCP/SMTP; 21 Feb 84  21:52:43 PST
Date: 21 Feb 1984 21:51-PST
Sender: RBATES@USC-ISIB
Subject: Re: By the way    
From: Raymond Bates <RBATES at ISIB>
To: RPG@SU-AI
Cc: jonl.pa@PARC
Message-ID: <[USC-ISIB]21-Feb-84 21:51:11.RBATES>
In-Reply-To: The message of 21 Feb 84  1723 PST from Dick Gabriel <RPG@SU-AI>

The version of TPRINT you gave me has calls to CREATEW, DSPSCROLL and
DSPYPOSITION.  These functions are not in Interlisp-10 or ISI-Interlisp.
TPRINT also create a record/datatype called REGION which is not
in Interlisp-10 or ISI-Interlisp.  What to do you want me to
do with TPRINT?

/Ray

Oh. Just time (PRINT TESTPATTERN) in TPRINT and flush the window stuff.
∂15-Feb-84  2218	ROD  	lisp timings in AI list.
To:   roach@RUTGERS
CC:   laws@SRI-AI, RPG@SU-AI    
RPG is out of town so I thought I'd resond to the publication of his
numbers in AI list. He (see Gabriel and Masinter, 1982 ACM Lisp symp)
has strong reasons not to publish single number benchmarks as were
posted in AI list. He has spent the last two years doing extensive
benchmarkings of all those machines/implementations and more. His
report is about to be released. I suggest you ask him for a copy, before
half-assedly trying to collect numbers just for TAK.

  Rod Brooks

∂16-Feb-84  0124	Rees@YALE 	Quick hack TAK (benchmarks will be benchmarks)   
Received: from YALE by SU-AI with TCP/SMTP; 16 Feb 84  01:23:57 PST
Received: by YALE-BULLDOG via CHAOS; Thu, 16 Feb 84 02:40:52 EST
Received: from YALE-RING by YALE-RES via CHAOS; Thu, 16 Feb 84 02:33:55 EST
Subject: Quick hack TAK (benchmarks will be benchmarks)
Date: Thu, 16 Feb 84 02:34:15 EST
From: Jonathan Rees <Rees@YALE.ARPA>
To: T-Users@YALE.ARPA, T-Discussion@YALE.ARPA
cc: Roach@RUTGERS.ARPA, RPG@SU-AI.ARPA, GJC@MIT-MC.ARPA, Hedrick@YALE.ARPA

Many of you may have seen a recent message on AILIST about a certain
Lisp benchmark.  We ran it in T and got the following reults.  Included
is the original message for comparison.

On Apollo in T fixnum recklssnss low   6.1   seconds compiled
On Apollo in T fixnum recklssnss high  3.5   seconds compiled
On Apollo in T fixnum block compiled   3.0   seconds compiled
On 11/750 in T fixnum recklssnss low   5.9   seconds compiled
On 11/750 in T fixnum recklssnss high  2.4   seconds compiled
On 11/750 in T fixnum block compiled   1.9   seconds compiled
On 11/780 in T fixnum recklssnss low   3.4   seconds compiled
On 11/780 in T fixnum recklssnss high  1.7   seconds compiled
On 11/780 in T fixnum block compiled   1.26  seconds compiled

"Recklssnss" refers to a run-time switch which controls consistency
checking in function calls.  Pardon the abbreviation; I wanted it
to fit the format of the chart.  "Block compiled" means that TAK was
made a local function using LABELS so that calls would be compiled
as direct jumps.

The timings were obtained by running a loop which computed
(tak 18 12 6) ten times and dividing elapsed (wall) time by ten.

The 750 was running Unix; the 780 was running VMS.

------------------------------------------

Date: 11 Feb 84 17:54:24 EST
From: John <Roach@RUTGERS.ARPA>
Subject: Timings of LISPs and Machines


I dug up these timings, they are a little bit out of date but seem a little
more informative.  They were done by Dick Gabriel at SU-AI in 1982 and passed
along by Chuck Hedrick at Rutgers.  Some of the times have been updated to
reflect current machines by myself.  These have been marked with the
date of 1984.  All machines were measured using the function -

an almost Takeuchi function as defined by John McCarthy

(defun tak (x y z)
       (cond ((not (< y x))
              z)
             (t (tak (tak (1- x) y z)
                     (tak (1- y) z x)
                     (tak (1- z) x y)))))

------------------------------------------

(tak 18. 12. 6.)

On 11/750 in Franz ordinary arith     19.9   seconds compiled
On 11/780 in Franz with (nfc)(TAKF)   15.8   seconds compiled   (GJC time)
On Rutgers-20 in Interlisp/1984       13.8   seconds compiled
On 11/780 in Franz (nfc)               8.4   seconds compiled   (KIM time)
On 11/780 in Franz (nfc)               8.35  seconds compiled   (GJC time)
On 11/780 in Franz with (ffc)(TAKF)    7.5   seconds compiled   (GJC time)
On 11/750 in PSL, generic arith        7.1   seconds compiled
On MC (KL) in MacLisp (TAKF)           5.9   seconds compiled   (GJC time)
On Dolphin in InterLisp/1984           4.81  seconds compiled
On Vax 11/780 in InterLisp (load = 0)  4.24  seconds compiled
On Foonly F2 in MacLisp                4.1   seconds compiled
On Apollo (MC68000) PASCAL             3.8   seconds            (extra waits?)
On 11/750 in Franz, Fixnum arith       3.6   seconds compiled
On MIT CADR in ZetaLisp                3.16  seconds compiled   (GJC time)
On MIT CADR in ZetaLisp                3.1   seconds compiled   (ROD time)
On MIT CADR in ZetaLisp (TAKF)         3.1   seconds compiled   (GJC time)
On Apollo (MC68000) PSL SYSLISP        2.93  seconds compiled
On 11/780 in NIL (TAKF)                2.8   seconds compiled   (GJC time)
On 11/780 in NIL                       2.7   seconds compiled   (GJC time)
On 11/750 in C                         2.4   seconds
On Rutgers-20 in Interlisp/Block/84    2.225 seconds compiled
On 11/780 in Franz (ffc)               2.13  seconds compiled   (KIM time)
On 11/780 (Diablo) in Franz (ffc)      2.1   seconds compiled   (VRP time)
On 11/780 in Franz (ffc)               2.1   seconds compiled   (GJC time)
On 68000 in C                          1.9   seconds
On Utah-20 in PSL Generic arith        1.672 seconds compiled
On Dandelion in Interlisp/1984         1.65  seconds compiled
On 11/750 in PSL INUM arith            1.4   seconds compiled
On 11/780 (Diablo) in C                1.35  seconds
On 11/780 in Franz (lfc)               1.13  seconds compiled   (KIM time)
On UTAH-20 in Lisp 1.6                 1.1   seconds compiled
On UTAH-20 in PSL Inum arith           1.077 seconds compiled
On Rutgers-20 in Elisp                 1.063 seconds compiled
On Rutgers-20 in R/UCI lisp             .969 seconds compiled
On SAIL (KL) in MacLisp                 .832 seconds compiled
On SAIL in bummed MacLisp               .795 seconds compiled
On MC (KL) in MacLisp (TAKF,dcl)        .789 seconds compiled
On 68000 in machine language            .7   seconds
On MC (KL) in MacLisp (dcl)             .677 seconds compiled
On SAIL in bummed MacLisp (dcl)         .616 seconds compiled
On SAIL (KL) in MacLisp (dcl)           .564 seconds compiled
On Dorado in InterLisp Jan 1982 (tr)    .53  seconds compiled
On UTAH-20 in SYSLISP arith             .526 seconds compiled
On SAIL in machine language             .255 seconds (wholine)
On SAIL in machine language             .184 seconds (ebox-doesn't include mem)
On SCORE (2060) in machine language     .162 seconds (ebox)
On S-1 Mark I in machine language       .114 seconds (ebox & ibox)

I would be interested if people who had these machines/languages available
could update some of the timings.  There also isn't any timings for Symbolics
or LMI.

John.

∂16-Feb-84  1042	LAWS@SRI-AI.ARPA 	Re: lisp timings in AI list.    
Received: from SRI-AI by SU-AI with TCP/SMTP; 16 Feb 84  10:40:36 PST
Date: Thu 16 Feb 84 09:25:46-PST
From: Ken Laws <Laws@SRI-AI.ARPA>
Subject: Re: lisp timings in AI list.
To: ROD@SU-AI.ARPA
cc: RPG@SU-AI.ARPA, Roach@RUTGERS.ARPA
In-Reply-To: Message from "Rod Brooks <ROD@SU-AI>" of Wed 15 Feb 84 22:18:00-PST

Thanks for cc'ing me on your message to Roach.  I'm just an innocent
bystander here, but I do accept a certain amount of responsibility for
anything I publish in AIList.  In defense of the item:  RPG's reasons
for not publishing single numbers may be absolutely correct and proper,
but that is no reason for accusing others of half-assed behavior when
they fail to hold the same beliefs.  Timings on the TAK function seem
to be better than no timings at all, as RPG must once have believed or
he would not have collected the data.  John seems to have acted in
good faith, especially as he provided the function and asked for updates
on performance obtained by others.  I see no reason not to publish such
timing statistics.  If someone else wants to publish I/O bandwidths or
other statistics, let them proceed in that manner instead of stomping
on those who provide whatever limited information they can.  RPG's
soon-to-be-released benchmarks will no doubt be helpful to many, but
unless he has copyrighted or otherwise limited distribution of his
previous work, there seems to have been no violation of ethics in citing
the results.

Let me know if you want you comments on this matter passed along to
AIList.  I assume that Dick Gabriel will soon be able to speak for
himself, and will be happy to pass along reasoned arguments of any sort.

					-- Ken Laws
-------

∂16-Feb-84  1359	ROD  	Lisp timings  
To:   roach@RUTGERS, laws@SRI-AI
CC:   RPG@SU-AI, ROD@SU-AI   
I apologize for using the word(s) "half-assed".

If you susbsititute "half-hearted" for them, then I think
my message still stands. RPG has done a full scale study
with 23 (I think) benchmarks which cover a wide range of
aspects of Lisp performance. TAK tests only function call,
but even its measurements are all tied up with caching
behavior (hence the TAK' benchmark in the full study). The
danger with one number results is that people say, ah Lisp
x scored 3 and Lisp y scored 4 and hence Lisp x is better
than Lisp y. THey and the salesman will have a distorted view
of the whole picture.

In most scientific disciplines people don't publish other's
incomplete unpublished data without consulting with the data collector,
even if the data collector has let his colleagues have a copy--
letting a colleague look at data is not saying ``oh you can
publish this if you want''.
Usually data is published with a description of the experiment
and what was being measured.

I'm sure RPG will have something coherent to say on the matter
when he gets back. My comments are not intnded for AIlist.

  Rod

∂16-Feb-84  1359	ROD  	lisp timings  
I hope my first message to Roach didn't seem out of line to you.
With you out of town, and not logging in, I thought I should put
a mild damper on him to keep things on hold until you were able to
do something. Maybe Rees' response will be the only one, but I
feared an avalanche.

∂17-Feb-84  0712	ROACH@RUTGERS.ARPA 	Re: lisp timings in AI list.  
Received: from RUTGERS by SU-AI with TCP/SMTP; 17 Feb 84  07:12:29 PST
Date: 17 Feb 84 10:13:05 EST
From: John <Roach@RUTGERS.ARPA>
Subject: Re: lisp timings in AI list.
To: ROD@SU-AI.ARPA
cc: laws@SRI-AI.ARPA, RPG@SU-AI.ARPA
In-Reply-To: Message from "Rod Brooks <ROD@SU-AI>" of 16 Feb 84 01:18:00 EST


Mr. Brooks,

I have already read the publication you referred to long ago.  I am quite
aware of the pitfalls that occur when one tries to compare two different
machines based on such a narrow minded comparison.  I did not intend for
my message to the net to be construed as anything more than it is..
namely a comparison of TAK on many machines and languages.  It seem
relevant for the ailist as there seemed nothing forthcoming about the
machines other than subjective impressions.

I apologize to Richard Gabriel if I've made public information he considers
harmful or frivolous.  I will be more careful in the future (if at all).

As for half-assedly trying to collect numbers, I consider it an insult.

John Roach
-------

∂22-Feb-84  0705	ROACH@RUTGERS.ARPA 	Re: Benchmarks      
Received: from RUTGERS by SU-AI with TCP/SMTP; 22 Feb 84  07:04:57 PST
Date: 22 Feb 84 10:05:00 EST
From: John <Roach@RUTGERS.ARPA>
Subject: Re: Benchmarks    
To: RPG@SU-AI.ARPA
In-Reply-To: Message from "Dick Gabriel <RPG@SU-AI>" of 21 Feb 84 20:21:00 EST


Dick,

I'm sorry to have caused a stir that I didn't mean to occur.  I just thought
it would be useful to provide some comparsion between lisps and machines.
I thought it was so obvious what tak' did and DIDN'T do that it never
occured to me to put a disclaimer in the notice.

I'm sorry if I provided information that you preferred wasn't made public.
I had asked Chuck Hedrick and he thought it was okay, I never thought that
the timings weren't public knowledge anyway.

Sorry,

John.

P.S.  Would the Dandy Tiger happen to be in your soon to be relased report?
-------

Varia
The times you quoted are public, and I hope my message did not sound upset.
Single-benchmark comparisons irk some people, especially when they show
poorly, and more so when the benchmark measures just the thing they
thought could be sacrificed in the implementation. 

Yes, the so-called wide-body Dandelion times will appear. Xerox has not tuned
it up yet, so they don't want to release numbers yet.
			-rpg-
∂23-Feb-84  1006	RBATES@USC-ISIB 	Re: By the way    
Received: from USC-ISIB by SU-AI with TCP/SMTP; 23 Feb 84  10:06:42 PST
Date: 23 Feb 1984 09:58-PST
Sender: RBATES@USC-ISIB
Subject: Re: By the way    
From: Raymond Bates <RBATES at ISIB>
To: RPG@SU-AI
Message-ID: <[USC-ISIB]23-Feb-84 09:58:47.RBATES>
In-Reply-To: The message of 21 Feb 84  2257 PST from Dick Gabriel <RPG@SU-AI>

(PRINT TESTPATTERN) takes 11.472 seconds on ISI-Interlisp.
Looking over your chart comparing your benchmarks on lisp systems
the only one that looks odd is the one for traverse.  I did a
little test and used the fast versions of RPLACA, RPLACD, fetch
and replace.  This got our time down to 292.08 seconds.  Is this
OK?  Is there any thing else you want me to do?

/Ray

∂23-Feb-84  1144	RBATES@USC-ISIB 	Re: Varia    
Received: from USC-ISIB by SU-AI with TCP/SMTP; 23 Feb 84  11:43:48 PST
Date: 23 Feb 1984 11:42-PST
Sender: RBATES@USC-ISIB
Subject: Re: Varia    
From: Raymond Bates <RBATES at ISIB>
To: RPG@SU-AI
Message-ID: <[USC-ISIB]23-Feb-84 11:42:10.RBATES>
In-Reply-To: The message of 23 Feb 84  1028 PST from Dick Gabriel <RPG@SU-AI>

I didn't break down the times for Traverse into Initialize=30.656
and Traverse=261.424 for you.  Also I didn't run any test on
Frpoly for you.  Did you forget to give that file?

We are in the process of putting together an implementors guide
for ISI-Interlisp.  Most of it isn't written down yet.  You can
look at our 1982 or 84 Papers on ISI-Interlisp.  Will keep you
informed about any more implementation notes.  I can send you our
Users Guide if you send me your address.

/Ray

∂28-Feb-84  0953	RBATES@USC-ISIB 	Re: Varia    
Received: from USC-ISIB by SU-AI with TCP/SMTP; 28 Feb 84  09:53:45 PST
Date: 28 Feb 1984 09:50-PST
Sender: RBATES@USC-ISIB
Subject: Re: Varia    
From: Raymond Bates <RBATES at ISIB>
To: RPG@SU-AI
Cc: feber@USC-ISIB, mcgreal@ADA-VAX
Message-ID: <[USC-ISIB]28-Feb-84 09:50:20.RBATES>
In-Reply-To: The message of 23 Feb 84  1028 PST from Dick Gabriel <RPG@SU-AI>

P.S.

I took another look at your benchmarks and was able to improve there
times.  From a PC sampler program we have for ISI-Interlisp, one
can see BROWSE was spending most of its time in NTHCHAR.  I took
a look at NTHCHAR and by re-coding it so the normal case didn't
require so many calls to FIX'es, I got the time for BROWSE down to
124.304 (from 155.232).

DESTRUCTIVE turned up that RPTQ was faster than FRPTQ.  They
should have compiled to the same thing.  Also I finally added
FLENGTH as a hand coded function (it was on my list) and changed
RPLACA to FRPLACA and RPLACD to FRPLACD.  After all this I got
the time down of DESTRUCTIVE to 7.726 from (18.304).

All the changes to ISI-Interlisp will make it to the next
released system.  Don't worry I don't plan to look at you bench
marks any more.

/Ray

∂12-Mar-84  2039	KESSLER@UTAH-20.ARPA 	Re: Message? 
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 12 Mar 84  20:33:36 PST
Date: Mon 12 Mar 84 21:27:10-MST
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: Re: Message? 
To: RPG@SU-AI.ARPA
In-Reply-To: Message from "Dick Gabriel <RPG@SU-AI.ARPA>" of Mon 12 Mar 84 19:38:00-MST

Here are the results of our timing tests for the dec-20.  A couple still didn't
work, but we will get those run and forward.  As soon as these work, we'll run
on the other machines.  Note, we couldn't tell if they are correct, since we
don't have the answers that they produce...  So if any numbers are way off,
let us know.  Also, this is pretty raw.  Would you like us to munge on it
to make it a little more readable???  Finally, we'll forward the code as soon
as all of the tests work.

Bob.
-------

			11-Mar-84  4:00:01

BATCON Version	104(4133)			GLXLIB Version	1(527)

	    Job TIME Req #983 for KESSLER in Stream 0

	OUTPUT:	 Nolog				TIME-LIMIT: 1:00:00
	UNIQUE:	 Yes				BATCH-LOG:  Supersede
	RESTART: No				ASSISTANCE: Yes
						SEQUENCE:   1416

	Input from => SS:<PSL.NEW>TIME.CTL.6
	Output to  => SS:<PSL.NEW>TIME.LOG



 4:00:01 MONTR	 Univ of Utah Computer Science KL-20, TOPS-20AN Monitor 5.3(5703)
 4:00:01 MONTR	@SET TIME-LIMIT 3600
 4:00:01 MONTR	@LOGIN KESSLER SMALL
 4:00:04 MONTR	 Job 29 on TTY213 11-Mar-84 04:00:04
 4:00:04 MONTR	 Previous login at 10-Mar-84 18:50:20
 4:00:08 MONTR	 End of BATCH.CMD.4
 4:00:14 MONTR	@
 4:00:14 MONTR	[SS Mounted]
 4:00:14 MONTR	
 4:00:14 MONTR	[CONNECTED TO SS:<PSL.NEW>]
 4:00:14 MONTR	@systat
 4:00:15 MONTR	 Sun 11-Mar-84 04:00:15  Up 6 days 10:58:43
 4:00:15 MONTR	 3+5 Jobs   Load av (class Symb)   1.06   0.23   0.08
 4:00:15 MONTR	
 4:00:15 MONTR	 No operator in attendance
 4:00:15 MONTR	
 4:00:15 MONTR	 Job  Line Program  User              Foreign host
 4:00:15 MONTR	  16   142  Exec    RAMAZANKHANI
 4:00:15 MONTR	  29*  213  Systat  KESSLER
 4:00:15 MONTR	  37   145  Exec    KROHNFELDT
 4:00:15 MONTR	
 4:00:15 MONTR	   1   206  Dnload  OPERATOR
 4:00:15 MONTR	   2   207  Batcon  OPERATOR
 4:00:15 MONTR	   3   210  Netsrv  OPERATOR
 4:00:16 MONTR	   4   211  Unxftp  OPERATOR
 4:00:16 MONTR	   5   212  Mmailr  OPERATOR
 4:00:16 MONTR	@@cd pnew:
 4:00:16 MONTR	@@psl:psl
 4:00:16 MONTR	[Keeping psl]
 4:00:20 USER	Utah PSL (3.2), 6-Mar-84 
 4:00:20 USER	1 lisp> *(off usermode)
 4:00:20 USER	NIL
 4:00:20 USER	2 lisp> *(dskin "time-boyer.sl")
 4:00:22 USER	NIL
 4:00:22 USER	NIL
 4:00:22 USER	NIL
 4:00:22 USER	NIL
 4:00:27 USER	*** (ADD-LEMMA): base 1331215, length 39 words
 4:00:27 USER	ADD-LEMMA
 4:00:28 USER	*** (ADD-LEMMA-LST): base 1331272, length 14 words
 4:00:28 USER	ADD-LEMMA-LST
 4:00:28 USER	*** (APPLY-SUBST): base 1331326, length 23 words
 4:00:28 USER	APPLY-SUBST
 4:00:28 USER	*** (APPLY-SUBST-LST): base 1331355, length 20 words
 4:00:28 USER	APPLY-SUBST-LST
 4:00:29 USER	*** (FALSEP): base 1331404, length 16 words
 4:00:29 USER	FALSEP
 4:00:29 USER	*** (ONE-WAY-UNIFY): base 1331442, length 4 words
 4:00:29 USER	ONE-WAY-UNIFY
 4:00:29 USER	*** (ONE-WAY-UNIFY1): base 1331453, length 38 words
 4:00:29 USER	ONE-WAY-UNIFY1
 4:00:30 USER	*** (ONE-WAY-UNIFY1-LST): base 1331525, length 21 words
 4:00:30 USER	ONE-WAY-UNIFY1-LST
 4:00:30 USER	*** (REWRITE): base 1331566, length 24 words
 4:00:30 USER	REWRITE
 4:00:30 USER	*** (REWRITE-ARGS): base 1331616, length 18 words
 4:00:30 USER	REWRITE-ARGS
 4:00:31 USER	*** (REWRITE-WITH-LEMMAS): base 1331640, length 27 words
 4:00:31 USER	REWRITE-WITH-LEMMAS
 4:01:35 USER	*** (SETUP): base 1332203, length 2346 words
 4:01:35 USER	SETUP
 4:01:36 USER	*** (TAUTOLOGYP): base 1336666, length 79 words
 4:01:36 USER	TAUTOLOGYP
 4:01:36 USER	*** (TAUTP): base 1337010, length 5 words
 4:01:36 USER	TAUTP
 4:01:37 USER	*** (TEST): base 1337017, length 165 words
 4:01:37 USER	TEST
 4:01:37 USER	*** (TRANS-OF-IMPLIES): base 1337276, length 15 words
 4:01:37 USER	TRANS-OF-IMPLIES
 4:01:37 USER	*** (TRANS-OF-IMPLIES1): base 1337315, length 25 words
 4:01:37 USER	TRANS-OF-IMPLIES1
 4:01:38 USER	*** (TRUEP): base 1337346, length 16 words
 4:01:38 USER	TRUEP
 4:01:38 USER	*** (TIMIT): base 1337371, length 181 words
 4:01:38 USER	TIMIT
 4:01:39 USER	T
 4:01:39 USER	Boyer Test
 4:01:39 USER	"Boyer Test"
 4:01:39 USER	
 4:01:39 USER	Timing performed on DEC-20
 4:01:39 USER	11-Mar-84 04:01:38 .
 4:01:39 USER	*** Garbage collection starting
 4:01:40 USER	*** GC 3: time 980 ms, 110836 recovered, 253028 free
 4:01:48 USER	*** Garbage collection starting
 4:01:54 USER	*** GC 4: time 5022 ms, 179291 recovered, 179291 free
 4:01:59 USER	*** Garbage collection starting
 4:02:07 USER	*** GC 5: time 6900 ms, 140840 recovered, 140841 free
 4:02:08 USER	
 4:02:08 USER	Cpu (- GC) Time = 11.74 secs
 4:02:08 USER	Elapsed Time = 27.0 secs
 4:02:08 USER	Wholine Time = 0.0
 4:02:08 USER	GC Time = 11.922 secs
 4:02:08 USER	Load Average Before  = 1.0
 4:02:08 USER	Load Average After   = 1.1
 4:02:08 USER	Average Load Average = 1.05
 4:02:08 USER	NIL
 4:02:08 USER	NIL
 4:02:08 USER	3 lisp> *(quit)
 4:02:08 MONTR	@@reset psl
 4:02:08 MONTR	@@systat
 4:02:09 MONTR	 Sun 11-Mar-84 04:02:08  Up 6 days 11:00:37
 4:02:09 MONTR	 3+5 Jobs   Load av (class Symb)   1.60   0.80   0.32
 4:02:09 MONTR	
 4:02:09 MONTR	 No operator in attendance
 4:02:09 MONTR	
 4:02:09 MONTR	 Job  Line Program  User              Foreign host
 4:02:09 MONTR	  16   142  Exec    RAMAZANKHANI
 4:02:09 MONTR	  29*  213  Systat  KESSLER
 4:02:09 MONTR	  37   145  Exec    KROHNFELDT
 4:02:09 MONTR	
 4:02:09 MONTR	   1   206  Dnload  OPERATOR
 4:02:09 MONTR	   2   207  Batcon  OPERATOR
 4:02:09 MONTR	   3   210  Netsrv  OPERATOR
 4:02:10 MONTR	   4   211  Unxftp  OPERATOR
 4:02:10 MONTR	   5   212  Mmailr  OPERATOR
 4:02:10 MONTR	@@psl:psl
 4:02:10 MONTR	[Keeping psl]
 4:02:14 USER	Utah PSL (3.2), 6-Mar-84 
 4:02:14 USER	1 lisp> *(off usermode)
 4:02:14 USER	NIL
 4:02:14 USER	2 lisp> *(dskin "time-browse.sl")
 4:02:17 USER	NIL
 4:02:17 USER	NIL
 4:02:17 USER	NIL
 4:02:17 USER	NIL
 4:02:17 USER	NIL
 4:02:17 USER	NIL
 4:02:25 USER	*** (INIT): base 1331207, length 102 words
 4:02:25 USER	INIT
 4:02:25 USER	*** (MOD): base 1331357, length 13 words
 4:02:25 USER	MOD
 4:02:25 USER	NIL
 4:02:25 USER	21
 4:02:25 USER	*** (SEED): base 1331403, length 4 words
 4:02:25 USER	SEED
 4:02:25 USER	*** (RANDOM): base 1331412, length 7 words
 4:02:25 USER	RANDOM
 4:02:26 USER	*** (RANDOMIZE): base 1331424, length 51 words
 4:02:26 USER	RANDOMIZE
 4:02:26 USER	*** (CHAR1): base 1331512, length 8 words
 4:02:26 USER	CHAR1
 4:02:29 USER	*** (MATCH): base 1331536, length 167 words
 4:02:29 USER	MATCH
 4:02:30 USER	*** (BROWSE): base 1332022, length 144 words
 4:02:30 USER	BROWSE
 4:02:30 USER	*** (INVESTIGATE): base 1332247, length 40 words
 4:02:30 USER	INVESTIGATE
 4:02:30 USER	NIL
 4:02:31 USER	*** (TIMIT): base 1332322, length 178 words
 4:02:31 USER	TIMIT
 4:02:31 USER	Browse Test.
 4:02:31 USER	"Browse Test."
 4:02:31 USER	
 4:02:31 USER	Timing performed on DEC-20
 4:02:31 USER	11-Mar-84 04:02:31 .
 4:02:31 USER	*** Garbage collection starting
 4:02:33 USER	*** GC 3: time 940 ms, 71848 recovered, 253262 free
 4:02:41 USER	*** Garbage collection starting
 4:02:42 USER	*** GC 4: time 1586 ms, 244803 recovered, 244803 free
 4:02:49 USER	*** Garbage collection starting
 4:02:51 USER	*** GC 5: time 1782 ms, 244852 recovered, 244853 free
 4:02:57 USER	*** Garbage collection starting
 4:02:59 USER	*** GC 6: time 1734 ms, 244894 recovered, 244895 free
 4:03:07 USER	
 4:03:07 USER	Cpu (- GC) Time = 24.126 secs
 4:03:07 USER	Elapsed Time = 34.0 secs
 4:03:07 USER	Wholine Time = 0.0
 4:03:07 USER	GC Time = 5.102 secs
 4:03:07 USER	Load Average Before  = 1.2
 4:03:07 USER	Load Average After   = 1.2
 4:03:07 USER	Average Load Average = 1.2
 4:03:07 USER	NIL
 4:03:08 USER	NIL
 4:03:08 USER	3 lisp> *(quit)
 4:03:08 MONTR	@@reset psl
 4:03:08 MONTR	@@systat
 4:03:08 MONTR	 Sun 11-Mar-84 04:03:08  Up 6 days 11:01:36
 4:03:08 MONTR	 3+5 Jobs   Load av (class Symb)   1.34   0.87   0.37
 4:03:08 MONTR	
 4:03:09 MONTR	 No operator in attendance
 4:03:09 MONTR	
 4:03:09 MONTR	 Job  Line Program  User              Foreign host
 4:03:09 MONTR	  16   142  Exec    RAMAZANKHANI
 4:03:09 MONTR	  29*  213  Systat  KESSLER
 4:03:09 MONTR	  37   145  Exec    KROHNFELDT
 4:03:09 MONTR	
 4:03:09 MONTR	   1   206  Dnload  OPERATOR
 4:03:09 MONTR	   2   207  Batcon  OPERATOR
 4:03:09 MONTR	   3   210  Netsrv  OPERATOR
 4:03:10 MONTR	   4   211  Unxftp  OPERATOR
 4:03:10 MONTR	   5   212  Mmailr  OPERATOR
 4:03:10 MONTR	@@psl:psl
 4:03:10 MONTR	[Keeping psl]
 4:03:14 USER	Utah PSL (3.2), 6-Mar-84 
 4:03:14 USER	1 lisp> *(off usermode)
 4:03:14 USER	NIL
 4:03:14 USER	2 lisp> *(dskin "time-dderiv.sl")
 4:03:16 USER	NIL
 4:03:16 USER	NIL
 4:03:16 USER	NIL
 4:03:16 USER	NIL
 4:03:16 USER	NIL
 4:03:17 USER	NIL
 4:03:22 USER	*** (DER1): base 1331202, length 9 words
 4:03:22 USER	DER1
 4:03:22 USER	*** (PLUS-DERIV): base 1331217, length 7 words
 4:03:22 USER	PLUS-DERIV
 4:03:22 USER	#<Code 1 331220>
 4:03:22 USER	*** (DIFFERENCE-DERIV): base 1331233, length 7 words
 4:03:22 USER	DIFFERENCE-DERIV
 4:03:22 USER	#<Code 1 331234>
 4:03:22 USER	*** (TIMES-DERIV): base 1331246, length 19 words
 4:03:22 USER	TIMES-DERIV
 4:03:22 USER	#<Code 1 331247>
 4:03:23 USER	*** (QUOTIENT-DERIV): base 1331275, length 36 words
 4:03:23 USER	QUOTIENT-DERIV
 4:03:23 USER	#<Code 1 331276>
 4:03:23 USER	*** (DERIV): base 1331341, length 29 words
 4:03:23 USER	DERIV
 4:03:23 USER	*** (RUNIT): base 1331404, length 50 words
 4:03:23 USER	RUNIT
 4:03:24 USER	NIL
 4:03:24 USER	NIL
 4:03:24 USER	*** (TIMIT): base 1331471, length 181 words
 4:03:24 USER	TIMIT
 4:03:24 USER	DDeriv Test, also same as FDDeriv.
 4:03:24 USER	"DDeriv Test, also same as FDDeriv."
 4:03:24 USER	
 4:03:24 USER	Timing performed on DEC-20
 4:03:24 USER	11-Mar-84 04:03:25 .
 4:03:24 USER	*** Garbage collection starting
 4:03:26 USER	*** GC 3: time 865 ms, 28957 recovered, 253464 free
 4:03:29 USER	*** Garbage collection starting
 4:03:31 USER	*** GC 4: time 1302 ms, 253383 recovered, 253383 free
 4:03:34 USER	*** Garbage collection starting
 4:03:36 USER	*** GC 5: time 1404 ms, 253448 recovered, 253449 free
 4:03:36 USER	
 4:03:36 USER	Cpu (- GC) Time = 6.04 secs
 4:03:36 USER	Elapsed Time = 10.0 secs
 4:03:36 USER	Wholine Time = 0.0
 4:03:36 USER	GC Time = 2.706 secs
 4:03:36 USER	Load Average Before  = 1.2
 4:03:36 USER	Load Average After   = 1.2
 4:03:36 USER	Average Load Average = 1.2
 4:03:36 USER	NIL
 4:03:36 USER	NIL
 4:03:36 USER	3 lisp> *(quit)
 4:03:36 MONTR	@@reset psl
 4:03:36 MONTR	@@systat
 4:03:36 MONTR	 Sun 11-Mar-84 04:03:36  Up 6 days 11:02:04
 4:03:36 MONTR	 3+5 Jobs   Load av (class Symb)   1.27   0.89   0.40
 4:03:36 MONTR	
 4:03:36 MONTR	 No operator in attendance
 4:03:37 MONTR	
 4:03:37 MONTR	 Job  Line Program  User              Foreign host
 4:03:37 MONTR	  16   142  Exec    RAMAZANKHANI
 4:03:37 MONTR	  29*  213  Systat  KESSLER
 4:03:37 MONTR	  37   145  Exec    KROHNFELDT
 4:03:37 MONTR	
 4:03:37 MONTR	   1   206  Dnload  OPERATOR
 4:03:37 MONTR	   2   207  Batcon  OPERATOR
 4:03:37 MONTR	   3   210  Netsrv  OPERATOR
 4:03:37 MONTR	   4   211  Unxftp  OPERATOR
 4:03:37 MONTR	   5   212  Mmailr  OPERATOR
 4:03:37 MONTR	@@psl:psl
 4:03:37 MONTR	[Keeping psl]
 4:03:42 USER	Utah PSL (3.2), 6-Mar-84 
 4:03:42 USER	1 lisp> *(off usermode)
 4:03:42 USER	NIL
 4:03:42 USER	2 lisp> *(dskin "time-deriv.sl")
 4:03:45 USER	NIL
 4:03:45 USER	NIL
 4:03:45 USER	NIL
 4:03:45 USER	NIL
 4:03:45 USER	NIL
 4:03:45 USER	NIL
 4:03:45 USER	NIL
 4:03:50 USER	*** (DER1): base 1332305, length 9 words
 4:03:50 USER	DER1
 4:03:51 USER	*** (DERIV): base 1332316, length 93 words
 4:03:51 USER	DERIV
 4:03:51 USER	*** (RUNIT): base 1332461, length 50 words
 4:03:51 USER	RUNIT
 4:03:51 USER	NIL
 4:03:51 USER	NIL
 4:03:52 USER	*** (TIMIT): base 1332546, length 181 words
 4:03:52 USER	TIMIT
 4:03:52 USER	Deriv Test.
 4:03:52 USER	"Deriv Test."
 4:03:52 USER	
 4:03:52 USER	Timing performed on DEC-20
 4:03:52 USER	11-Mar-84 04:03:52 .
 4:03:52 USER	*** Garbage collection starting
 4:03:53 USER	*** GC 3: time 867 ms, 28276 recovered, 253474 free
 4:03:56 USER	*** Garbage collection starting
 4:03:58 USER	*** GC 4: time 1021 ms, 253403 recovered, 253403 free
 4:04:00 USER	
 4:04:00 USER	Cpu (- GC) Time = 5.384 secs
 4:04:00 USER	Elapsed Time = 7.0 secs
 4:04:00 USER	Wholine Time = 0.0
 4:04:00 USER	GC Time = 1.021 secs
 4:04:00 USER	Load Average Before  = 1.1
 4:04:00 USER	Load Average After   = 1.1
 4:04:00 USER	Average Load Average = 1.1
 4:04:00 USER	NIL
 4:04:01 USER	NIL
 4:04:01 USER	3 lisp> *(quit)
 4:04:01 MONTR	@@reset psl
 4:04:01 MONTR	@@systat
 4:04:01 MONTR	 Sun 11-Mar-84 04:04:00  Up 6 days 11:02:28
 4:04:01 MONTR	 3+5 Jobs   Load av (class Symb)   1.21   0.91   0.41
 4:04:01 MONTR	
 4:04:01 MONTR	 No operator in attendance
 4:04:01 MONTR	
 4:04:01 MONTR	 Job  Line Program  User              Foreign host
 4:04:01 MONTR	  16   142  Exec    RAMAZANKHANI
 4:04:01 MONTR	  29*  213  Systat  KESSLER
 4:04:01 MONTR	  37   145  Exec    KROHNFELDT
 4:04:01 MONTR	
 4:04:01 MONTR	   1   206  Dnload  OPERATOR
 4:04:01 MONTR	   2   207  Batcon  OPERATOR
 4:04:01 MONTR	   3   210  Netsrv  OPERATOR
 4:04:01 MONTR	   4   211  Unxftp  OPERATOR
 4:04:01 MONTR	   5   212  Mmailr  OPERATOR
 4:04:01 MONTR	@@psl:psl
 4:04:01 MONTR	[Keeping psl]
 4:04:05 USER	Utah PSL (3.2), 6-Mar-84 
 4:04:05 USER	1 lisp> *(off usermode)
 4:04:05 USER	NIL
 4:04:05 USER	2 lisp> *(dskin "time-destru.sl")
 4:04:07 USER	NIL
 4:04:07 USER	NIL
 4:04:07 USER	NIL
 4:04:07 USER	NIL
 4:04:07 USER	NIL
 4:04:07 USER	NIL
 4:04:14 USER	*** (DESTRUCTIVE): base 1331201, length 144 words
 4:04:15 USER	DESTRUCTIVE
 4:04:15 USER	NIL
 4:04:15 USER	NIL
 4:04:15 USER	*** (TIMIT): base 1331424, length 183 words
 4:04:15 USER	TIMIT
 4:04:16 USER	Destru Test
 4:04:16 USER	"Destru Test"
 4:04:16 USER	
 4:04:16 USER	Timing performed on DEC-20
 4:04:16 USER	11-Mar-84 04:04:15 .
 4:04:16 USER	*** Garbage collection starting
 4:04:17 USER	*** GC 3: time 864 ms, 35990 recovered, 253305 free
 4:04:17 USER	
 4:04:17 USER	Cpu (- GC) Time = .238 secs
 4:04:17 USER	Elapsed Time = 0.0 secs
 4:04:17 USER	Wholine Time = 0.0
 4:04:17 USER	GC Time = 0.0 secs
 4:04:17 USER	Load Average Before  = 1.1
 4:04:18 USER	Load Average After   = 1.1
 4:04:18 USER	Average Load Average = 1.1
 4:04:18 USER	NIL
 4:04:18 USER	NIL
 4:04:18 USER	3 lisp> *(quit)
 4:04:19 MONTR	@@reset psl
 4:04:19 MONTR	@@systat
 4:04:19 MONTR	 Sun 11-Mar-84 04:04:18  Up 6 days 11:02:47
 4:04:19 MONTR	 3+5 Jobs   Load av (class Symb)   1.17   0.92   0.43
 4:04:19 MONTR	
 4:04:19 MONTR	 No operator in attendance
 4:04:19 MONTR	
 4:04:19 MONTR	 Job  Line Program  User              Foreign host
 4:04:19 MONTR	  16   142  Exec    RAMAZANKHANI
 4:04:19 MONTR	  29*  213  Systat  KESSLER
 4:04:19 MONTR	  37   145  Exec    KROHNFELDT
 4:04:19 MONTR	
 4:04:19 MONTR	   1   206  Dnload  OPERATOR
 4:04:19 MONTR	   2   207  Batcon  OPERATOR
 4:04:19 MONTR	   3   210  Netsrv  OPERATOR
 4:04:19 MONTR	   4   211  Unxftp  OPERATOR
 4:04:19 MONTR	   5   212  Mmailr  OPERATOR
 4:04:19 MONTR	@@psl:psl
 4:04:19 MONTR	[Keeping psl]
 4:04:23 USER	Utah PSL (3.2), 6-Mar-84 
 4:04:23 USER	1 lisp> *(off usermode)
 4:04:24 USER	NIL
 4:04:24 USER	2 lisp> *(dskin "time-div.sl")
 4:04:26 USER	NIL
 4:04:26 USER	NIL
 4:04:26 USER	NIL
 4:04:26 USER	NIL
 4:04:26 USER	NIL
 4:04:26 USER	NIL
 4:04:31 USER	*** (CREATE-N): base 1331200, length 17 words
 4:04:31 USER	CREATE-N
 4:04:31 USER	*** (DIV2): base 1331223, length 21 words
 4:04:32 USER	DIV2
 4:04:32 USER	*** (DV2): base 1331252, length 15 words
 4:04:32 USER	DV2
 4:04:32 USER	*** (TEST1): base 1331274, length 21 words
 4:04:32 USER	TEST1
 4:04:32 USER	*** (TEST2): base 1331324, length 21 words
 4:04:32 USER	TEST2
 4:04:32 USER	NIL
 4:04:32 USER	(NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL 
 4:04:33 USER	NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL 
 4:04:33 USER	NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL 
 4:04:33 USER	NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL 
 4:04:33 USER	NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL 
 4:04:33 USER	NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL 
 4:04:33 USER	NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL 
 4:04:33 USER	NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL 
 4:04:33 USER	NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL 
 4:04:34 USER	NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL 
 4:04:34 USER	NIL NIL NIL NIL NIL NIL NIL NIL NIL NIL)
 4:04:34 USER	NIL
 4:04:34 USER	NIL
 4:04:35 USER	*** (TIMIT1): base 1331354, length 182 words
 4:04:35 USER	TIMIT1
 4:04:35 USER	*** (TIMIT2): base 1331645, length 182 words
 4:04:35 USER	TIMIT2
 4:04:36 USER	*** String continued over end-of-line
 4:04:36 USER	*** String continued over end-of-line
 4:04:36 USER	*** String continued over end-of-line
 4:04:36 USER	*** String continued over end-of-line
 4:04:36 USER	***** ***** Error in token scanner: EOF encountered inside a string
 4:04:36 USER	NIL
 4:04:36 USER	3 lisp> *(quit)
 4:04:36 MONTR	@@reset psl
 4:04:36 MONTR	@@systat
 4:04:36 MONTR	 Sun 11-Mar-84 04:04:36  Up 6 days 11:03:04
 4:04:36 MONTR	 3+5 Jobs   Load av (class Symb)   1.16   0.93   0.44
 4:04:36 MONTR	
 4:04:36 MONTR	 No operator in attendance
 4:04:36 MONTR	
 4:04:36 MONTR	 Job  Line Program  User              Foreign host
 4:04:36 MONTR	  16   142  Exec    RAMAZANKHANI
 4:04:36 MONTR	  29*  213  Systat  KESSLER
 4:04:36 MONTR	  37   145  Exec    KROHNFELDT
 4:04:36 MONTR	
 4:04:36 MONTR	   1   206  Dnload  OPERATOR
 4:04:36 MONTR	   2   207  Batcon  OPERATOR
 4:04:37 MONTR	   3   210  Netsrv  OPERATOR
 4:04:37 MONTR	   4   211  Unxftp  OPERATOR
 4:04:37 MONTR	   5   212  Mmailr  OPERATOR
 4:04:37 MONTR	@@psl:psl
 4:04:37 MONTR	[Keeping psl]
 4:04:40 USER	Utah PSL (3.2), 6-Mar-84 
 4:04:40 USER	1 lisp> *(off usermode)
 4:04:40 USER	NIL
 4:04:40 USER	2 lisp> *(dskin "time-fft.sl")
 4:04:43 USER	NIL
 4:04:43 USER	NIL
 4:04:43 USER	NIL
 4:04:43 USER	NIL
 4:04:43 USER	NIL
 4:04:43 USER	NIL
 4:04:48 USER	*** (**): base 1335746, length 18 words
 4:04:48 USER	**
 4:04:48 USER	NIL
 4:04:52 USER	*** (FFT): base 1336034, length 273 words
 4:04:52 USER	FFT
 4:04:53 USER	NIL
 4:04:54 USER	*** (TIMIT): base 1336467, length 196 words
 4:04:54 USER	TIMIT
 4:04:54 USER	FFT Test
 4:04:54 USER	"FFT Test"
 4:04:54 USER	
 4:04:54 USER	Timing performed on DEC-20
 4:04:54 USER	11-Mar-84 04:04:54 .
 4:04:54 USER	*** Garbage collection starting
 4:04:55 USER	*** GC 3: time 1006 ms, 58750 recovered, 251120 free
 4:05:01 USER	*** Garbage collection starting
 4:05:03 USER	*** GC 4: time 1789 ms, 244956 recovered, 244958 free
 4:05:09 USER	*** Garbage collection starting
 4:05:11 USER	*** GC 5: time 1582 ms, 244950 recovered, 244952 free
 4:05:17 USER	*** Garbage collection starting
 4:05:18 USER	*** GC 6: time 1433 ms, 244950 recovered, 244952 free
 4:05:24 USER	*** Garbage collection starting
 4:05:26 USER	*** GC 7: time 1656 ms, 244950 recovered, 244952 free
 4:05:31 USER	*** Garbage collection starting
 4:05:33 USER	*** GC 8: time 1708 ms, 244956 recovered, 244958 free
 4:05:38 USER	*** Garbage collection starting
 4:05:40 USER	*** GC 9: time 1326 ms, 244950 recovered, 244952 free
 4:05:45 USER	*** Garbage collection starting
 4:05:48 USER	*** GC 10: time 1576 ms, 244956 recovered, 244958 free
 4:05:48 USER	
 4:05:48 USER	Cpu (- GC) Time = 35.517 secs
 4:05:48 USER	Elapsed Time = 53.0 secs
 4:05:48 USER	Wholine Time = 0.0
 4:05:48 USER	GC Time = 11.07 secs
 4:05:48 USER	Load Average Before  = 1.1
 4:05:48 USER	Load Average After   = 1.1
 4:05:48 USER	Average Load Average = 1.1
 4:05:48 USER	NIL
 4:05:48 USER	NIL
 4:05:48 USER	3 lisp> *(quit)
 4:05:49 MONTR	@@reset psl
 4:05:49 MONTR	@@systat
 4:05:49 MONTR	 Sun 11-Mar-84 04:05:49  Up 6 days 11:04:18
 4:05:50 MONTR	 3+5 Jobs   Load av (class Symb)   1.09   0.96   0.49
 4:05:50 MONTR	
 4:05:50 MONTR	 No operator in attendance
 4:05:50 MONTR	
 4:05:50 MONTR	 Job  Line Program  User              Foreign host
 4:05:50 MONTR	  16   142  Exec    RAMAZANKHANI
 4:05:50 MONTR	  29*  213  Systat  KESSLER
 4:05:50 MONTR	  37   145  Exec    KROHNFELDT
 4:05:50 MONTR	
 4:05:50 MONTR	   1   206  Dnload  OPERATOR
 4:05:50 MONTR	   2   207  Batcon  OPERATOR
 4:05:50 MONTR	   3   210  Netsrv  OPERATOR
 4:05:50 MONTR	   4   211  Unxftp  OPERATOR
 4:05:50 MONTR	   5   212  Mmailr  OPERATOR
 4:05:50 MONTR	@@psl:psl
 4:05:51 MONTR	[Keeping psl]
 4:05:55 USER	Utah PSL (3.2), 6-Mar-84 
 4:05:55 USER	1 lisp> *(off usermode)
 4:05:55 USER	NIL
 4:05:55 USER	2 lisp> *(dskin "time-fprint.sl")
 4:05:57 USER	NIL
 4:05:57 USER	NIL
 4:05:57 USER	NIL
 4:05:57 USER	NIL
 4:05:57 USER	NIL
 4:05:57 USER	NIL
 4:06:03 USER	*** (INIT): base 1331205, length 25 words
 4:06:03 USER	INIT
 4:06:03 USER	*** (INIT1): base 1331236, length 41 words
 4:06:03 USER	INIT1
 4:06:03 USER	NIL
 4:06:03 USER	(ABCDEF12 CDEFGH23 EFGHIJ34 GHIJKL45 IJKLMN56 KLMNOP67 MNOPQR78 OPQRST89 
 4:06:03 USER	QRSTUV90 STUVWX01 UVWXYZ12 WXYZAB23 XYZABC34 !123456AB !234567BC !345678CD 
 4:06:03 USER	!456789DE !567890EF !678901FG !789012GH !890123HI)
 4:06:03 USER	NIL
 4:06:04 USER	((((((!678901FG !567890EF !567890EF !456789DE !456789DE !345678CD) !234567BC (
 4:06:04 USER	!567890EF !456789DE !456789DE !345678CD !345678CD !234567BC) !123456AB (
 4:06:04 USER	!456789DE !345678CD !345678CD !234567BC !234567BC !123456AB) XYZABC34) 
 4:06:04 USER	WXYZAB23 ((!567890EF !456789DE !456789DE !345678CD !345678CD !234567BC) 
 4:06:04 USER	!123456AB (!456789DE !345678CD !345678CD !234567BC !234567BC !123456AB) 
 4:06:04 USER	XYZABC34 (!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) 
 4:06:04 USER	WXYZAB23) UVWXYZ12 ((!456789DE !345678CD !345678CD !234567BC !234567BC 
 4:06:04 USER	!123456AB) XYZABC34 (!345678CD !234567BC !234567BC !123456AB !123456AB 
 4:06:04 USER	XYZABC34) WXYZAB23 (!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) 
 4:06:04 USER	UVWXYZ12) STUVWX01) QRSTUV90 (((!567890EF !456789DE !456789DE !345678CD 
 4:06:04 USER	!345678CD !234567BC) !123456AB (!456789DE !345678CD !345678CD !234567BC 
 4:06:04 USER	!234567BC !123456AB) XYZABC34 (!345678CD !234567BC !234567BC !123456AB 
 4:06:05 USER	!123456AB XYZABC34) WXYZAB23) UVWXYZ12 ((!456789DE !345678CD !345678CD 
 4:06:05 USER	!234567BC !234567BC !123456AB) XYZABC34 (!345678CD !234567BC !234567BC 
 4:06:05 USER	!123456AB !123456AB XYZABC34) WXYZAB23 (!234567BC !123456AB !123456AB 
 4:06:05 USER	XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12) STUVWX01 ((!345678CD !234567BC 
 4:06:05 USER	!234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (!234567BC !123456AB 
 4:06:05 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 
 4:06:05 USER	WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90) OPQRST89 (((!456789DE 
 4:06:05 USER	!345678CD !345678CD !234567BC !234567BC !123456AB) XYZABC34 (!345678CD 
 4:06:05 USER	!234567BC !234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (!234567BC 
 4:06:05 USER	!123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12) STUVWX01 ((
 4:06:05 USER	!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (
 4:06:05 USER	!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB 
 4:06:05 USER	XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90 ((!234567BC 
 4:06:06 USER	!123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 
 4:06:06 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 
 4:06:06 USER	UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89) MNOPQR78) KLMNOP67 ((((
 4:06:06 USER	!567890EF !456789DE !456789DE !345678CD !345678CD !234567BC) !123456AB (
 4:06:06 USER	!456789DE !345678CD !345678CD !234567BC !234567BC !123456AB) XYZABC34 (
 4:06:06 USER	!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) WXYZAB23) 
 4:06:06 USER	UVWXYZ12 ((!456789DE !345678CD !345678CD !234567BC !234567BC !123456AB) 
 4:06:06 USER	XYZABC34 (!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) 
 4:06:06 USER	WXYZAB23 (!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12) 
 4:06:06 USER	STUVWX01 ((!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) 
 4:06:07 USER	WXYZAB23 (!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (
 4:06:07 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90) 
 4:06:07 USER	OPQRST89 (((!456789DE !345678CD !345678CD !234567BC !234567BC !123456AB) 
 4:06:07 USER	XYZABC34 (!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) 
 4:06:07 USER	WXYZAB23 (!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12) 
 4:06:07 USER	STUVWX01 ((!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) 
 4:06:07 USER	WXYZAB23 (!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (
 4:06:07 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90 ((
 4:06:07 USER	!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB 
 4:06:07 USER	XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 
 4:06:07 USER	WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89) MNOPQR78 (((
 4:06:08 USER	!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (
 4:06:08 USER	!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB 
 4:06:08 USER	XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90 ((!234567BC 
 4:06:08 USER	!123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 
 4:06:08 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 
 4:06:08 USER	UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89 ((!123456AB XYZABC34 XYZABC34 
 4:06:08 USER	WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 
 4:06:08 USER	UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 
 4:06:08 USER	QRSTUV90) OPQRST89) MNOPQR78) KLMNOP67) IJKLMN56 ((((!456789DE !345678CD 
 4:06:08 USER	!345678CD !234567BC !234567BC !123456AB) XYZABC34 (!345678CD !234567BC 
 4:06:08 USER	!234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (!234567BC !123456AB 
 4:06:08 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12) STUVWX01 ((!345678CD 
 4:06:08 USER	!234567BC !234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (!234567BC 
 4:06:09 USER	!123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 
 4:06:09 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90 ((!234567BC !123456AB 
 4:06:09 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 
 4:06:09 USER	WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 
 4:06:09 USER	UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89) MNOPQR78 (((!345678CD !234567BC 
 4:06:09 USER	!234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (!234567BC !123456AB 
 4:06:09 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 
 4:06:09 USER	WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90 ((!234567BC !123456AB 
 4:06:10 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 
 4:06:10 USER	WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 
 4:06:10 USER	UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89 ((!123456AB XYZABC34 XYZABC34 WXYZAB23 
 4:06:10 USER	WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 
 4:06:10 USER	STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) 
 4:06:10 USER	OPQRST89) MNOPQR78) KLMNOP67 (((!234567BC !123456AB !123456AB XYZABC34 
 4:06:10 USER	XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 
 4:06:10 USER	UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) 
 4:06:10 USER	QRSTUV90) OPQRST89 ((!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) 
 4:06:10 USER	STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (
 4:06:10 USER	WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89) MNOPQR78 ((
 4:06:10 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 
 4:06:11 USER	UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89 (UVWXYZ12 STUVWX01 
 4:06:11 USER	STUVWX01 QRSTUV90 QRSTUV90 OPQRST89) MNOPQR78) KLMNOP67) IJKLMN56) GHIJKL45) 
 4:06:11 USER	EFGHIJ34 (((((!567890EF !456789DE !456789DE !345678CD !345678CD !234567BC) 
 4:06:11 USER	!123456AB (!456789DE !345678CD !345678CD !234567BC !234567BC !123456AB) 
 4:06:11 USER	XYZABC34 (!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) 
 4:06:11 USER	WXYZAB23) UVWXYZ12 ((!456789DE !345678CD !345678CD !234567BC !234567BC 
 4:06:11 USER	!123456AB) XYZABC34 (!345678CD !234567BC !234567BC !123456AB !123456AB 
 4:06:11 USER	XYZABC34) WXYZAB23 (!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) 
 4:06:11 USER	UVWXYZ12) STUVWX01 ((!345678CD !234567BC !234567BC !123456AB !123456AB 
 4:06:11 USER	XYZABC34) WXYZAB23 (!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) 
 4:06:11 USER	UVWXYZ12 (!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) 
 4:06:11 USER	QRSTUV90) OPQRST89 (((!456789DE !345678CD !345678CD !234567BC !234567BC 
 4:06:12 USER	!123456AB) XYZABC34 (!345678CD !234567BC !234567BC !123456AB !123456AB 
 4:06:12 USER	XYZABC34) WXYZAB23 (!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) 
 4:06:12 USER	UVWXYZ12) STUVWX01 ((!345678CD !234567BC !234567BC !123456AB !123456AB 
 4:06:12 USER	XYZABC34) WXYZAB23 (!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) 
 4:06:12 USER	UVWXYZ12 (!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) 
 4:06:12 USER	QRSTUV90 ((!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) 
 4:06:12 USER	UVWXYZ12 (!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (
 4:06:12 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89) 
 4:06:12 USER	MNOPQR78 (((!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) 
 4:06:12 USER	WXYZAB23 (!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (
 4:06:12 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90 ((
 4:06:12 USER	!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB 
 4:06:13 USER	XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 
 4:06:13 USER	WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89 ((!123456AB XYZABC34 
 4:06:13 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 
 4:06:13 USER	UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 
 4:06:13 USER	STUVWX01 QRSTUV90) OPQRST89) MNOPQR78) KLMNOP67) IJKLMN56 ((((!456789DE 
 4:06:13 USER	!345678CD !345678CD !234567BC !234567BC !123456AB) XYZABC34 (!345678CD 
 4:06:13 USER	!234567BC !234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (!234567BC 
 4:06:13 USER	!123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12) STUVWX01 ((
 4:06:13 USER	!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (
 4:06:13 USER	!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB 
 4:06:13 USER	XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90 ((!234567BC 
 4:06:13 USER	!123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 
 4:06:14 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 
 4:06:14 USER	UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89) MNOPQR78 (((!345678CD 
 4:06:14 USER	!234567BC !234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (!234567BC 
 4:06:14 USER	!123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 
 4:06:14 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90 ((!234567BC !123456AB 
 4:06:14 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 
 4:06:14 USER	WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 
 4:06:14 USER	UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89 ((!123456AB XYZABC34 XYZABC34 WXYZAB23 
 4:06:14 USER	WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 
 4:06:14 USER	STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) 
 4:06:14 USER	OPQRST89) MNOPQR78) KLMNOP67 (((!234567BC !123456AB !123456AB XYZABC34 
 4:06:14 USER	XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 
 4:06:15 USER	UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) 
 4:06:15 USER	QRSTUV90) OPQRST89 ((!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) 
 4:06:15 USER	STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (
 4:06:15 USER	WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89) MNOPQR78 ((
 4:06:15 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 
 4:06:15 USER	UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89 (UVWXYZ12 STUVWX01 
 4:06:15 USER	STUVWX01 QRSTUV90 QRSTUV90 OPQRST89) MNOPQR78) KLMNOP67) IJKLMN56) GHIJKL45 ((((
 4:06:15 USER	!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (
 4:06:15 USER	!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB 
 4:06:15 USER	XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90 ((!234567BC 
 4:06:15 USER	!123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 
 4:06:16 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 
 4:06:16 USER	UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89 ((!123456AB XYZABC34 XYZABC34 
 4:06:16 USER	WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 
 4:06:16 USER	UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 
 4:06:16 USER	QRSTUV90) OPQRST89) MNOPQR78) KLMNOP67 (((!234567BC !123456AB !123456AB 
 4:06:16 USER	XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 WXYZAB23 
 4:06:16 USER	WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 
 4:06:16 USER	STUVWX01) QRSTUV90) OPQRST89 ((!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 
 4:06:16 USER	UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) 
 4:06:16 USER	QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89) 
 4:06:16 USER	MNOPQR78 ((XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (
 4:06:16 USER	WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89 (UVWXYZ12 
 4:06:17 USER	STUVWX01 STUVWX01 QRSTUV90 QRSTUV90 OPQRST89) MNOPQR78) KLMNOP67) IJKLMN56 (((
 4:06:17 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 
 4:06:17 USER	WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 
 4:06:17 USER	UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89) MNOPQR78 ((XYZABC34 WXYZAB23 
 4:06:17 USER	WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 
 4:06:17 USER	STUVWX01 STUVWX01 QRSTUV90) OPQRST89 (UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90 
 4:06:17 USER	QRSTUV90 OPQRST89) MNOPQR78) KLMNOP67 ((WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 
 4:06:17 USER	STUVWX01 QRSTUV90) OPQRST89 (UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90 QRSTUV90 
 4:06:17 USER	OPQRST89) MNOPQR78 (STUVWX01 QRSTUV90 QRSTUV90 OPQRST89 OPQRST89 MNOPQR78) 
 4:06:17 USER	KLMNOP67) IJKLMN56) GHIJKL45) EFGHIJ34) CDEFGH23 (((((!456789DE !345678CD 
 4:06:17 USER	!345678CD !234567BC !234567BC !123456AB) XYZABC34 (!345678CD !234567BC 
 4:06:17 USER	!234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (!234567BC !123456AB 
 4:06:17 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12) STUVWX01 ((!345678CD 
 4:06:17 USER	!234567BC !234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (!234567BC 
 4:06:17 USER	!123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 
 4:06:17 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90 ((!234567BC !123456AB 
 4:06:18 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 
 4:06:18 USER	WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 
 4:06:18 USER	UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89) MNOPQR78 (((!345678CD !234567BC 
 4:06:18 USER	!234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (!234567BC !123456AB 
 4:06:18 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 
 4:06:18 USER	WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90 ((!234567BC !123456AB 
 4:06:18 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 
 4:06:18 USER	WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 
 4:06:18 USER	UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89 ((!123456AB XYZABC34 XYZABC34 WXYZAB23 
 4:06:18 USER	WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 
 4:06:18 USER	STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) 
 4:06:19 USER	OPQRST89) MNOPQR78) KLMNOP67 (((!234567BC !123456AB !123456AB XYZABC34 
 4:06:19 USER	XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 
 4:06:19 USER	UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) 
 4:06:19 USER	QRSTUV90) OPQRST89 ((!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) 
 4:06:19 USER	STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (
 4:06:19 USER	WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89) MNOPQR78 ((
 4:06:19 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 
 4:06:19 USER	UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89 (UVWXYZ12 STUVWX01 
 4:06:19 USER	STUVWX01 QRSTUV90 QRSTUV90 OPQRST89) MNOPQR78) KLMNOP67) IJKLMN56) GHIJKL45 ((((
 4:06:19 USER	!345678CD !234567BC !234567BC !123456AB !123456AB XYZABC34) WXYZAB23 (
 4:06:19 USER	!234567BC !123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB 
 4:06:20 USER	XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01) QRSTUV90 ((!234567BC 
 4:06:20 USER	!123456AB !123456AB XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 
 4:06:20 USER	XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 
 4:06:20 USER	UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90) OPQRST89 ((!123456AB XYZABC34 XYZABC34 
 4:06:20 USER	WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 
 4:06:20 USER	UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 
 4:06:20 USER	QRSTUV90) OPQRST89) MNOPQR78) KLMNOP67 (((!234567BC !123456AB !123456AB 
 4:06:20 USER	XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 WXYZAB23 
 4:06:20 USER	WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 
 4:06:20 USER	STUVWX01) QRSTUV90) OPQRST89 ((!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 
 4:06:20 USER	UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) 
 4:06:20 USER	QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89) 
 4:06:21 USER	MNOPQR78 ((XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (
 4:06:21 USER	WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89 (UVWXYZ12 
 4:06:21 USER	STUVWX01 STUVWX01 QRSTUV90 QRSTUV90 OPQRST89) MNOPQR78) KLMNOP67) IJKLMN56 (((
 4:06:21 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 
 4:06:21 USER	WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 
 4:06:21 USER	UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89) MNOPQR78 ((XYZABC34 WXYZAB23 
 4:06:21 USER	WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 
 4:06:21 USER	STUVWX01 STUVWX01 QRSTUV90) OPQRST89 (UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90 
 4:06:21 USER	QRSTUV90 OPQRST89) MNOPQR78) KLMNOP67 ((WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 
 4:06:21 USER	STUVWX01 QRSTUV90) OPQRST89 (UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90 QRSTUV90 
 4:06:21 USER	OPQRST89) MNOPQR78 (STUVWX01 QRSTUV90 QRSTUV90 OPQRST89 OPQRST89 MNOPQR78) 
 4:06:22 USER	KLMNOP67) IJKLMN56) GHIJKL45) EFGHIJ34 ((((!234567BC !123456AB !123456AB 
 4:06:22 USER	XYZABC34 XYZABC34 WXYZAB23) UVWXYZ12 (!123456AB XYZABC34 XYZABC34 WXYZAB23 
 4:06:22 USER	WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 
 4:06:22 USER	STUVWX01) QRSTUV90) OPQRST89 ((!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 
 4:06:22 USER	UVWXYZ12) STUVWX01 (XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) 
 4:06:22 USER	QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89) 
 4:06:22 USER	MNOPQR78 ((XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (
 4:06:22 USER	WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89 (UVWXYZ12 
 4:06:22 USER	STUVWX01 STUVWX01 QRSTUV90 QRSTUV90 OPQRST89) MNOPQR78) KLMNOP67) IJKLMN56 (((
 4:06:22 USER	!123456AB XYZABC34 XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12) STUVWX01 (XYZABC34 
 4:06:22 USER	WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 
 4:06:22 USER	UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89) MNOPQR78 ((XYZABC34 WXYZAB23 
 4:06:23 USER	WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 
 4:06:23 USER	STUVWX01 STUVWX01 QRSTUV90) OPQRST89 (UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90 
 4:06:23 USER	QRSTUV90 OPQRST89) MNOPQR78) KLMNOP67 ((WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 
 4:06:23 USER	STUVWX01 QRSTUV90) OPQRST89 (UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90 QRSTUV90 
 4:06:23 USER	OPQRST89) MNOPQR78 (STUVWX01 QRSTUV90 QRSTUV90 OPQRST89 OPQRST89 MNOPQR78) 
 4:06:23 USER	KLMNOP67) IJKLMN56) GHIJKL45 (((XYZABC34 WXYZAB23 WXYZAB23 UVWXYZ12 UVWXYZ12 
 4:06:23 USER	STUVWX01) QRSTUV90 (WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) 
 4:06:23 USER	OPQRST89 (UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90 QRSTUV90 OPQRST89) MNOPQR78) 
 4:06:23 USER	KLMNOP67 ((WXYZAB23 UVWXYZ12 UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90) OPQRST89 (
 4:06:23 USER	UVWXYZ12 STUVWX01 STUVWX01 QRSTUV90 QRSTUV90 OPQRST89) MNOPQR78 (STUVWX01 
 4:06:23 USER	QRSTUV90 QRSTUV90 OPQRST89 OPQRST89 MNOPQR78) KLMNOP67) IJKLMN56 ((UVWXYZ12 
 4:06:24 USER	STUVWX01 STUVWX01 QRSTUV90 QRSTUV90 OPQRST89) MNOPQR78 (STUVWX01 QRSTUV90 
 4:06:24 USER	QRSTUV90 OPQRST89 OPQRST89 MNOPQR78) KLMNOP67 (QRSTUV90 OPQRST89 OPQRST89 
 4:06:24 USER	MNOPQR78 MNOPQR78 KLMNOP67) IJKLMN56) GHIJKL45) EFGHIJ34) CDEFGH23) ABCDEF12)
 4:06:24 USER	*** (FPRINT): base 1331421, length 17 words
 4:06:24 USER	FPRINT
 4:06:24 USER	T
 4:06:24 USER	NIL
 4:06:24 USER	NIL
 4:06:25 USER	*** (TIMIT): base 1331445, length 181 words
 4:06:25 USER	TIMIT
 4:06:25 USER	Fprint Test
 4:06:25 USER	"Fprint Test"
 4:06:25 USER	
 4:06:25 USER	Timing performed on DEC-20
 4:06:25 USER	11-Mar-84 04:06:25 .
 4:06:25 USER	*** Garbage collection starting
 4:06:27 USER	*** GC 3: time 1050 ms, 25073 recovered, 249062 free
 4:06:32 USER	
 4:06:32 USER	Cpu (- GC) Time = 4.665 secs
 4:06:32 USER	Elapsed Time = 5.0 secs
 4:06:32 USER	Wholine Time = 0.0
 4:06:32 USER	GC Time = 0.0 secs
 4:06:32 USER	Load Average Before  = 1.3
 4:06:32 USER	Load Average After   = 1.3
 4:06:32 USER	Average Load Average = 1.3
 4:06:32 USER	NIL
 4:06:32 USER	NIL
 4:06:32 USER	3 lisp> *(quit)
 4:06:32 MONTR	@@reset psl
 4:06:32 MONTR	@@systat
 4:06:32 MONTR	 Sun 11-Mar-84 04:06:32  Up 6 days 11:05:00
 4:06:32 MONTR	 3+5 Jobs   Load av (class Symb)   1.14   0.99   0.52
 4:06:32 MONTR	
 4:06:32 MONTR	 No operator in attendance
 4:06:32 MONTR	
 4:06:32 MONTR	 Job  Line Program  User              Foreign host
 4:06:32 MONTR	  16   142  Exec    RAMAZANKHANI
 4:06:32 MONTR	  29*  213  Systat  KESSLER
 4:06:32 MONTR	  37   145  Exec    KROHNFELDT
 4:06:32 MONTR	
 4:06:32 MONTR	   1   206  Dnload  OPERATOR
 4:06:32 MONTR	   2   207  Batcon  OPERATOR
 4:06:33 MONTR	   3   210  Netsrv  OPERATOR
 4:06:33 MONTR	   4   211  Unxftp  OPERATOR
 4:06:33 MONTR	   5   212  Mmailr  OPERATOR
 4:06:33 MONTR	@@psl:psl
 4:06:33 MONTR	[Keeping psl]
 4:06:36 USER	Utah PSL (3.2), 6-Mar-84 
 4:06:36 USER	1 lisp> *(off usermode)
 4:06:36 USER	NIL
 4:06:36 USER	2 lisp> *(dskin "time-fread.sl")
 4:06:38 USER	NIL
 4:06:38 USER	NIL
 4:06:38 USER	NIL
 4:06:38 USER	NIL
 4:06:43 USER	*** (FREAD): base 1331200, length 16 words
 4:06:43 USER	FREAD
 4:06:43 USER	T
 4:06:43 USER	NIL
 4:06:44 USER	*** (TIMIT): base 1331227, length 181 words
 4:06:44 USER	TIMIT
 4:06:44 USER	timer test on fread
 4:06:44 USER	"timer test on fread"
 4:06:44 USER	
 4:06:44 USER	Timing performed on DEC-20
 4:06:44 USER	11-Mar-84 04:06:44 .
 4:06:44 USER	*** Garbage collection starting
 4:06:45 USER	*** GC 3: time 843 ms, 15707 recovered, 253480 free
 4:06:51 USER	
 4:06:51 USER	Cpu (- GC) Time = 5.725 secs
 4:06:51 USER	Elapsed Time = 6.0 secs
 4:06:51 USER	Wholine Time = 0.0
 4:06:52 USER	GC Time = 0.0 secs
 4:06:52 USER	Load Average Before  = 1.2
 4:06:52 USER	Load Average After   = 1.2
 4:06:52 USER	Average Load Average = 1.2
 4:06:52 USER	NIL
 4:06:52 USER	timit finished
 4:06:52 USER	"timit finished"
 4:06:52 USER	NIL
 4:06:52 USER	3 lisp> *(quit)
 4:06:52 MONTR	@@reset psl
 4:06:52 MONTR	@@systat
 4:06:52 MONTR	 Sun 11-Mar-84 04:06:52  Up 6 days 11:05:20
 4:06:52 MONTR	 3+5 Jobs   Load av (class Symb)   1.16   1.00   0.54
 4:06:52 MONTR	
 4:06:52 MONTR	 No operator in attendance
 4:06:52 MONTR	
 4:06:52 MONTR	 Job  Line Program  User              Foreign host
 4:06:52 MONTR	  16   142  Exec    RAMAZANKHANI
 4:06:52 MONTR	  29*  213  Systat  KESSLER
 4:06:52 MONTR	  37   145  Exec    KROHNFELDT
 4:06:52 MONTR	
 4:06:52 MONTR	   1   206  Dnload  OPERATOR
 4:06:52 MONTR	   2   207  Batcon  OPERATOR
 4:06:52 MONTR	   3   210  Netsrv  OPERATOR
 4:06:52 MONTR	   4   211  Unxftp  OPERATOR
 4:06:52 MONTR	   5   212  Mmailr  OPERATOR
 4:06:52 MONTR	@@psl:psl
 4:06:53 MONTR	[Keeping psl]
 4:06:56 USER	Utah PSL (3.2), 6-Mar-84 
 4:06:56 USER	1 lisp> *(off usermode)
 4:06:56 USER	NIL
 4:06:56 USER	2 lisp> *(dskin "time-puzzle.sl")
 4:06:58 USER	NIL
 4:06:58 USER	NIL
 4:06:58 USER	NIL
 4:06:58 USER	NIL
 4:06:58 USER	NIL
 4:06:58 USER	NIL
 4:06:58 USER	NIL
 4:06:58 USER	511
 4:06:58 USER	3
 4:06:58 USER	12
 4:06:58 USER	8
 4:06:58 USER	NIL
 4:06:58 USER	NIL
 4:06:58 USER	NIL
 4:06:58 USER	NIL
 4:06:58 USER	NIL
 4:06:58 USER	***** `IADD1' is an undefined function
 4:06:58 USER	NIL
 4:06:58 USER	3 lisp> *(quit)
 4:06:59 MONTR	@@reset psl
 4:06:59 MONTR	@@systat
 4:06:59 MONTR	 Sun 11-Mar-84 04:06:59  Up 6 days 11:05:27
 4:07:00 MONTR	 3+5 Jobs   Load av (class Symb)   1.16   1.01   0.54
 4:07:00 MONTR	
 4:07:00 MONTR	 No operator in attendance
 4:07:00 MONTR	
 4:07:00 MONTR	 Job  Line Program  User              Foreign host
 4:07:00 MONTR	  16   142  Exec    RAMAZANKHANI
 4:07:00 MONTR	  29*  213  Systat  KESSLER
 4:07:00 MONTR	  37   145  Exec    KROHNFELDT
 4:07:00 MONTR	
 4:07:00 MONTR	   1   206  Dnload  OPERATOR
 4:07:00 MONTR	   2   207  Batcon  OPERATOR
 4:07:00 MONTR	   3   210  Netsrv  OPERATOR
 4:07:00 MONTR	   4   211  Unxftp  OPERATOR
 4:07:00 MONTR	   5   212  Mmailr  OPERATOR
 4:07:00 MONTR	@@psl:psl
 4:07:00 MONTR	[Keeping psl]
 4:07:04 USER	Utah PSL (3.2), 6-Mar-84 
 4:07:04 USER	1 lisp> *(off usermode)
 4:07:04 USER	NIL
 4:07:04 USER	2 lisp> *(dskin "time-tprint.sl")
 4:07:06 USER	NIL
 4:07:06 USER	NIL
 4:07:06 USER	NIL
 4:07:06 USER	NIL
 4:07:12 USER	*** in LOAD1: Illegal to bind global *TRACEALL but binding anyway
 4:07:13 USER	*** in LOAD1: Illegal to bind global *BREAKALL but binding anyway
 4:07:13 USER	*** in LOAD1: Illegal to bind global *INSTALL but binding anyway
 4:07:13 USER	*** (LOAD1): base 1350165, length 10 words
 4:07:13 USER	NIL
 4:07:13 USER	NIL
 4:07:13 USER	NIL
 4:07:13 USER	*** (INIT): base 1350212, length 25 words
 4:07:13 USER	INIT
 4:07:14 USER	*** (INIT1): base 1350243, length 41 words
 4:07:14 USER	INIT1
 4:07:14 USER	NIL
 4:07:14 USER	(ABC1 CDE2 EFG3 GHI4 IJK5 KLM6 MNO7 OPQ8 QRS9 STU0 UVW1 WXY2 XYZ3 !123A !234B 
 4:07:14 USER	!345C !456D !567D !678E !789F !890G)
 4:07:14 USER	NIL
 4:07:14 USER	((((((!678E !567D !567D !456D !456D !345C) !234B (!567D !456D !456D !345C 
 4:07:14 USER	!345C !234B) !123A (!456D !345C !345C !234B !234B !123A) XYZ3) WXY2 ((!567D 
 4:07:14 USER	!456D !456D !345C !345C !234B) !123A (!456D !345C !345C !234B !234B !123A) 
 4:07:14 USER	XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2) UVW1 ((!456D !345C !345C 
 4:07:14 USER	!234B !234B !123A) XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B 
 4:07:14 USER	!123A !123A XYZ3 XYZ3 WXY2) UVW1) STU0) QRS9 (((!567D !456D !456D !345C !345C 
 4:07:15 USER	!234B) !123A (!456D !345C !345C !234B !234B !123A) XYZ3 (!345C !234B !234B 
 4:07:15 USER	!123A !123A XYZ3) WXY2) UVW1 ((!456D !345C !345C !234B !234B !123A) XYZ3 (
 4:07:15 USER	!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) 
 4:07:15 USER	UVW1) STU0 ((!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A 
 4:07:15 USER	XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9) OPQ8 (((
 4:07:15 USER	!456D !345C !345C !234B !234B !123A) XYZ3 (!345C !234B !234B !123A !123A 
 4:07:15 USER	XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1) STU0 ((!345C !234B !234B 
 4:07:15 USER	!123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 
 4:07:15 USER	XYZ3 WXY2 WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (
 4:07:15 USER	!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) 
 4:07:15 USER	OPQ8) MNO7) KLM6 ((((!567D !456D !456D !345C !345C !234B) !123A (!456D !345C 
 4:07:15 USER	!345C !234B !234B !123A) XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2) 
 4:07:15 USER	UVW1 ((!456D !345C !345C !234B !234B !123A) XYZ3 (!345C !234B !234B !123A 
 4:07:16 USER	!123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1) STU0 ((!345C !234B 
 4:07:16 USER	!234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A 
 4:07:16 USER	XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9) OPQ8 (((!456D !345C !345C !234B !234B 
 4:07:16 USER	!123A) XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A 
 4:07:17 USER	XYZ3 XYZ3 WXY2) UVW1) STU0 ((!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B 
 4:07:17 USER	!123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9 ((
 4:07:17 USER	!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (
 4:07:17 USER	XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8) MNO7 (((!345C !234B !234B !123A 
 4:07:17 USER	!123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 
 4:07:17 USER	WXY2 WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A 
 4:07:17 USER	XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((
 4:07:17 USER	!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (
 4:07:17 USER	WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7) KLM6) IJK5 ((((!456D !345C !345C 
 4:07:17 USER	!234B !234B !123A) XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B 
 4:07:17 USER	!123A !123A XYZ3 XYZ3 WXY2) UVW1) STU0 ((!345C !234B !234B !123A !123A XYZ3) 
 4:07:18 USER	WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) 
 4:07:18 USER	STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 
 4:07:18 USER	WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8) MNO7 (((!345C 
 4:07:18 USER	!234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (
 4:07:18 USER	!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 
 4:07:18 USER	WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 
 4:07:18 USER	STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 
 4:07:18 USER	UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7) KLM6 (((!234B 
 4:07:18 USER	!123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 
 4:07:18 USER	WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (
 4:07:18 USER	XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) 
 4:07:18 USER	MNO7 ((XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) 
 4:07:19 USER	OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7) KLM6) IJK5) GHI4) EFG3 (((((!567D 
 4:07:19 USER	!456D !456D !345C !345C !234B) !123A (!456D !345C !345C !234B !234B !123A) 
 4:07:19 USER	XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2) UVW1 ((!456D !345C !345C 
 4:07:19 USER	!234B !234B !123A) XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B 
 4:07:19 USER	!123A !123A XYZ3 XYZ3 WXY2) UVW1) STU0 ((!345C !234B !234B !123A !123A XYZ3) 
 4:07:19 USER	WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) 
 4:07:19 USER	STU0) QRS9) OPQ8 (((!456D !345C !345C !234B !234B !123A) XYZ3 (!345C !234B 
 4:07:19 USER	!234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1) STU0 ((
 4:07:19 USER	!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) 
 4:07:19 USER	UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 
 4:07:20 USER	XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 
 4:07:20 USER	UVW1 STU0) QRS9) OPQ8) MNO7 (((!345C !234B !234B !123A !123A XYZ3) WXY2 (
 4:07:20 USER	!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) 
 4:07:20 USER	QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 
 4:07:20 USER	UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 
 4:07:20 USER	WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 
 4:07:20 USER	STU0 QRS9) OPQ8) MNO7) KLM6) IJK5 ((((!456D !345C !345C !234B !234B !123A) 
 4:07:20 USER	XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 
 4:07:20 USER	WXY2) UVW1) STU0 ((!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A 
 4:07:20 USER	!123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9 ((
 4:07:20 USER	!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (
 4:07:20 USER	XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8) MNO7 (((!345C !234B !234B !123A 
 4:07:20 USER	!123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 
 4:07:21 USER	WXY2 WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A 
 4:07:21 USER	XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((
 4:07:21 USER	!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (
 4:07:21 USER	WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7) KLM6 (((!234B !123A !123A XYZ3 
 4:07:21 USER	XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 
 4:07:21 USER	UVW1 STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 
 4:07:21 USER	UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7 ((XYZ3 WXY2 
 4:07:21 USER	WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (UVW1 STU0 
 4:07:21 USER	STU0 QRS9 QRS9 OPQ8) MNO7) KLM6) IJK5) GHI4 ((((!345C !234B !234B !123A !123A 
 4:07:21 USER	XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 
 4:07:21 USER	WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 
 4:07:22 USER	XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((!123A 
 4:07:22 USER	XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 
 4:07:22 USER	UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7) KLM6 (((!234B !123A !123A XYZ3 XYZ3 
 4:07:22 USER	WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 
 4:07:22 USER	STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 
 4:07:22 USER	UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7 ((XYZ3 WXY2 WXY2 
 4:07:22 USER	UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (UVW1 STU0 STU0 
 4:07:22 USER	QRS9 QRS9 OPQ8) MNO7) KLM6) IJK5 (((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (
 4:07:22 USER	XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) 
 4:07:22 USER	MNO7 ((XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) 
 4:07:22 USER	OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7) KLM6 ((WXY2 UVW1 UVW1 STU0 STU0 
 4:07:23 USER	QRS9) OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7 (STU0 QRS9 QRS9 OPQ8 OPQ8 
 4:07:23 USER	MNO7) KLM6) IJK5) GHI4) EFG3) CDE2 (((((!456D !345C !345C !234B !234B !123A) 
 4:07:23 USER	XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 
 4:07:24 USER	WXY2) UVW1) STU0 ((!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A 
 4:07:24 USER	!123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9 ((
 4:07:24 USER	!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (
 4:07:24 USER	XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8) MNO7 (((!345C !234B !234B !123A 
 4:07:24 USER	!123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 
 4:07:24 USER	WXY2 WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A 
 4:07:25 USER	XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((
 4:07:25 USER	!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (
 4:07:25 USER	WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7) KLM6 (((!234B !123A !123A XYZ3 
 4:07:25 USER	XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 
 4:07:25 USER	UVW1 STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 
 4:07:25 USER	UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7 ((XYZ3 WXY2 
 4:07:25 USER	WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (UVW1 STU0 
 4:07:25 USER	STU0 QRS9 QRS9 OPQ8) MNO7) KLM6) IJK5) GHI4 ((((!345C !234B !234B !123A !123A 
 4:07:25 USER	XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 
 4:07:25 USER	WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 
 4:07:25 USER	XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((!123A 
 4:07:26 USER	XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 
 4:07:26 USER	UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7) KLM6 (((!234B !123A !123A XYZ3 XYZ3 
 4:07:26 USER	WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 
 4:07:26 USER	STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 
 4:07:26 USER	UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7 ((XYZ3 WXY2 WXY2 
 4:07:26 USER	UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (UVW1 STU0 STU0 
 4:07:26 USER	QRS9 QRS9 OPQ8) MNO7) KLM6) IJK5 (((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (
 4:07:26 USER	XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) 
 4:07:26 USER	MNO7 ((XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) 
 4:07:26 USER	OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7) KLM6 ((WXY2 UVW1 UVW1 STU0 STU0 
 4:07:26 USER	QRS9) OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7 (STU0 QRS9 QRS9 OPQ8 OPQ8 
 4:07:26 USER	MNO7) KLM6) IJK5) GHI4) EFG3 ((((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (
 4:07:27 USER	!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) 
 4:07:27 USER	OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) 
 4:07:27 USER	QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7 ((XYZ3 WXY2 WXY2 UVW1 UVW1 
 4:07:27 USER	STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 
 4:07:27 USER	OPQ8) MNO7) KLM6) IJK5 (((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 
 4:07:27 USER	WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7 ((XYZ3 
 4:07:27 USER	WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (UVW1 
 4:07:27 USER	STU0 STU0 QRS9 QRS9 OPQ8) MNO7) KLM6 ((WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (
 4:07:27 USER	UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7 (STU0 QRS9 QRS9 OPQ8 OPQ8 MNO7) KLM6) 
 4:07:27 USER	IJK5) GHI4 (((XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 
 4:07:27 USER	QRS9) OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7) KLM6 ((WXY2 UVW1 UVW1 STU0 
 4:07:27 USER	STU0 QRS9) OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7 (STU0 QRS9 QRS9 OPQ8 
 4:07:28 USER	OPQ8 MNO7) KLM6) IJK5 ((UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7 (STU0 QRS9 QRS9 
 4:07:28 USER	OPQ8 OPQ8 MNO7) KLM6 (QRS9 OPQ8 OPQ8 MNO7 MNO7 KLM6) IJK5) GHI4) EFG3) CDE2) 
 4:07:28 USER	ABC1)
 4:07:28 USER	NIL
 4:07:28 USER	NIL
 4:07:29 USER	*** (TIMIT): base 1350401, length 182 words
 4:07:29 USER	TIMIT
 4:07:29 USER	Running tprint...
 4:07:29 USER	"Running tprint..."
 4:07:29 USER	
 4:07:29 USER	Timing performed on DEC-20
 4:07:29 USER	11-Mar-84 04:07:29 .
 4:07:29 USER	*** Garbage collection starting
 4:07:30 USER	*** GC 3: time 1115 ms, 24735 recovered, 248656 free
 4:07:30 USER	((((((!678E !567D !567D !456D !456D !345C) !234B (!567D !456D !456D !345C 
 4:07:31 USER	!345C !234B) !123A (!456D !345C !345C !234B !234B !123A) XYZ3) WXY2 ((!567D 
 4:07:31 USER	!456D !456D !345C !345C !234B) !123A (!456D !345C !345C !234B !234B !123A) 
 4:07:31 USER	XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2) UVW1 ((!456D !345C !345C 
 4:07:31 USER	!234B !234B !123A) XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B 
 4:07:31 USER	!123A !123A XYZ3 XYZ3 WXY2) UVW1) STU0) QRS9 (((!567D !456D !456D !345C !345C 
 4:07:31 USER	!234B) !123A (!456D !345C !345C !234B !234B !123A) XYZ3 (!345C !234B !234B 
 4:07:31 USER	!123A !123A XYZ3) WXY2) UVW1 ((!456D !345C !345C !234B !234B !123A) XYZ3 (
 4:07:31 USER	!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) 
 4:07:31 USER	UVW1) STU0 ((!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A 
 4:07:31 USER	XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9) OPQ8 (((
 4:07:31 USER	!456D !345C !345C !234B !234B !123A) XYZ3 (!345C !234B !234B !123A !123A 
 4:07:31 USER	XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1) STU0 ((!345C !234B !234B 
 4:07:32 USER	!123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 
 4:07:32 USER	XYZ3 WXY2 WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (
 4:07:32 USER	!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) 
 4:07:32 USER	OPQ8) MNO7) KLM6 ((((!567D !456D !456D !345C !345C !234B) !123A (!456D !345C 
 4:07:32 USER	!345C !234B !234B !123A) XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2) 
 4:07:32 USER	UVW1 ((!456D !345C !345C !234B !234B !123A) XYZ3 (!345C !234B !234B !123A 
 4:07:32 USER	!123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1) STU0 ((!345C !234B 
 4:07:32 USER	!234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A 
 4:07:32 USER	XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9) OPQ8 (((!456D !345C !345C !234B !234B 
 4:07:32 USER	!123A) XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A 
 4:07:32 USER	XYZ3 XYZ3 WXY2) UVW1) STU0 ((!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B 
 4:07:32 USER	!123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9 ((
 4:07:33 USER	!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (
 4:07:33 USER	XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8) MNO7 (((!345C !234B !234B !123A 
 4:07:33 USER	!123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 
 4:07:33 USER	WXY2 WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A 
 4:07:33 USER	XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((
 4:07:33 USER	!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (
 4:07:33 USER	WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7) KLM6) IJK5 ((((!456D !345C !345C 
 4:07:33 USER	!234B !234B !123A) XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B 
 4:07:33 USER	!123A !123A XYZ3 XYZ3 WXY2) UVW1) STU0 ((!345C !234B !234B !123A !123A XYZ3) 
 4:07:33 USER	WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) 
 4:07:34 USER	STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 
 4:07:34 USER	WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8) MNO7 (((!345C 
 4:07:34 USER	!234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (
 4:07:34 USER	!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 
 4:07:34 USER	WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 
 4:07:34 USER	STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 
 4:07:34 USER	UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7) KLM6 (((!234B 
 4:07:34 USER	!123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 
 4:07:34 USER	WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (
 4:07:34 USER	XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) 
 4:07:34 USER	MNO7 ((XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) 
 4:07:34 USER	OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7) KLM6) IJK5) GHI4) EFG3 (((((!567D 
 4:07:35 USER	!456D !456D !345C !345C !234B) !123A (!456D !345C !345C !234B !234B !123A) 
 4:07:35 USER	XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2) UVW1 ((!456D !345C !345C 
 4:07:35 USER	!234B !234B !123A) XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B 
 4:07:35 USER	!123A !123A XYZ3 XYZ3 WXY2) UVW1) STU0 ((!345C !234B !234B !123A !123A XYZ3) 
 4:07:35 USER	WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) 
 4:07:35 USER	STU0) QRS9) OPQ8 (((!456D !345C !345C !234B !234B !123A) XYZ3 (!345C !234B 
 4:07:35 USER	!234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1) STU0 ((
 4:07:35 USER	!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) 
 4:07:35 USER	UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 
 4:07:35 USER	XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 
 4:07:35 USER	UVW1 STU0) QRS9) OPQ8) MNO7 (((!345C !234B !234B !123A !123A XYZ3) WXY2 (
 4:07:36 USER	!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) 
 4:07:36 USER	QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 
 4:07:36 USER	UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 
 4:07:36 USER	WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 
 4:07:36 USER	STU0 QRS9) OPQ8) MNO7) KLM6) IJK5 ((((!456D !345C !345C !234B !234B !123A) 
 4:07:36 USER	XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 
 4:07:36 USER	WXY2) UVW1) STU0 ((!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A 
 4:07:36 USER	!123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9 ((
 4:07:36 USER	!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (
 4:07:36 USER	XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8) MNO7 (((!345C !234B !234B !123A 
 4:07:36 USER	!123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 
 4:07:37 USER	WXY2 WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A 
 4:07:37 USER	XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((
 4:07:37 USER	!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (
 4:07:37 USER	WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7) KLM6 (((!234B !123A !123A XYZ3 
 4:07:37 USER	XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 
 4:07:37 USER	UVW1 STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 
 4:07:37 USER	UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7 ((XYZ3 WXY2 
 4:07:37 USER	WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (UVW1 STU0 
 4:07:37 USER	STU0 QRS9 QRS9 OPQ8) MNO7) KLM6) IJK5) GHI4 ((((!345C !234B !234B !123A !123A 
 4:07:37 USER	XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 
 4:07:37 USER	WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 
 4:07:38 USER	XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((!123A 
 4:07:38 USER	XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 
 4:07:38 USER	UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7) KLM6 (((!234B !123A !123A XYZ3 XYZ3 
 4:07:38 USER	WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 
 4:07:38 USER	STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 
 4:07:38 USER	UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7 ((XYZ3 WXY2 WXY2 
 4:07:38 USER	UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (UVW1 STU0 STU0 
 4:07:38 USER	QRS9 QRS9 OPQ8) MNO7) KLM6) IJK5 (((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (
 4:07:38 USER	XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) 
 4:07:38 USER	MNO7 ((XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) 
 4:07:39 USER	OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7) KLM6 ((WXY2 UVW1 UVW1 STU0 STU0 
 4:07:39 USER	QRS9) OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7 (STU0 QRS9 QRS9 OPQ8 OPQ8 
 4:07:39 USER	MNO7) KLM6) IJK5) GHI4) EFG3) CDE2 (((((!456D !345C !345C !234B !234B !123A) 
 4:07:39 USER	XYZ3 (!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 
 4:07:39 USER	WXY2) UVW1) STU0 ((!345C !234B !234B !123A !123A XYZ3) WXY2 (!234B !123A 
 4:07:39 USER	!123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0) QRS9 ((
 4:07:39 USER	!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (
 4:07:39 USER	XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8) MNO7 (((!345C !234B !234B !123A 
 4:07:39 USER	!123A XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 
 4:07:39 USER	WXY2 WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A 
 4:07:39 USER	XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((
 4:07:39 USER	!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (
 4:07:40 USER	WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7) KLM6 (((!234B !123A !123A XYZ3 
 4:07:40 USER	XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 
 4:07:40 USER	UVW1 STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 
 4:07:40 USER	UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7 ((XYZ3 WXY2 
 4:07:40 USER	WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (UVW1 STU0 
 4:07:40 USER	STU0 QRS9 QRS9 OPQ8) MNO7) KLM6) IJK5) GHI4 ((((!345C !234B !234B !123A !123A 
 4:07:40 USER	XYZ3) WXY2 (!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 
 4:07:40 USER	WXY2 UVW1) STU0) QRS9 ((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (!123A XYZ3 
 4:07:40 USER	XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) OPQ8 ((!123A 
 4:07:40 USER	XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 
 4:07:40 USER	UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7) KLM6 (((!234B !123A !123A XYZ3 XYZ3 
 4:07:41 USER	WXY2) UVW1 (!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 
 4:07:41 USER	STU0) QRS9) OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 
 4:07:41 USER	UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7 ((XYZ3 WXY2 WXY2 
 4:07:41 USER	UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (UVW1 STU0 STU0 
 4:07:41 USER	QRS9 QRS9 OPQ8) MNO7) KLM6) IJK5 (((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (
 4:07:41 USER	XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) 
 4:07:41 USER	MNO7 ((XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) 
 4:07:41 USER	OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7) KLM6 ((WXY2 UVW1 UVW1 STU0 STU0 
 4:07:41 USER	QRS9) OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7 (STU0 QRS9 QRS9 OPQ8 OPQ8 
 4:07:41 USER	MNO7) KLM6) IJK5) GHI4) EFG3 ((((!234B !123A !123A XYZ3 XYZ3 WXY2) UVW1 (
 4:07:41 USER	!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9) 
 4:07:41 USER	OPQ8 ((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) 
 4:07:42 USER	QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7 ((XYZ3 WXY2 WXY2 UVW1 UVW1 
 4:07:42 USER	STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 
 4:07:42 USER	OPQ8) MNO7) KLM6) IJK5 (((!123A XYZ3 XYZ3 WXY2 WXY2 UVW1) STU0 (XYZ3 WXY2 
 4:07:42 USER	WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8) MNO7 ((XYZ3 
 4:07:42 USER	WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (UVW1 
 4:07:42 USER	STU0 STU0 QRS9 QRS9 OPQ8) MNO7) KLM6 ((WXY2 UVW1 UVW1 STU0 STU0 QRS9) OPQ8 (
 4:07:42 USER	UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7 (STU0 QRS9 QRS9 OPQ8 OPQ8 MNO7) KLM6) 
 4:07:42 USER	IJK5) GHI4 (((XYZ3 WXY2 WXY2 UVW1 UVW1 STU0) QRS9 (WXY2 UVW1 UVW1 STU0 STU0 
 4:07:42 USER	QRS9) OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7) KLM6 ((WXY2 UVW1 UVW1 STU0 
 4:07:42 USER	STU0 QRS9) OPQ8 (UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7 (STU0 QRS9 QRS9 OPQ8 
 4:07:43 USER	OPQ8 MNO7) KLM6) IJK5 ((UVW1 STU0 STU0 QRS9 QRS9 OPQ8) MNO7 (STU0 QRS9 QRS9 
 4:07:43 USER	OPQ8 OPQ8 MNO7) KLM6 (QRS9 OPQ8 OPQ8 MNO7 MNO7 KLM6) IJK5) GHI4) EFG3) CDE2) 
 4:07:43 USER	ABC1)
 4:07:43 USER	
 4:07:43 USER	Cpu (- GC) Time = 4.351 secs
 4:07:43 USER	Elapsed Time = 12.0 secs
 4:07:43 USER	Wholine Time = 0.0
 4:07:43 USER	GC Time = 0.0 secs
 4:07:43 USER	Load Average Before  = 1.3
 4:07:43 USER	Load Average After   = 1.4
 4:07:43 USER	Average Load Average = 1.35
 4:07:43 USER	NIL
 4:07:43 USER	NIL
 4:07:43 USER	3 lisp> *(quit)
 4:07:43 MONTR	@@reset psl
 4:07:43 MONTR	@@systat
 4:07:43 MONTR	 Sun 11-Mar-84 04:07:43  Up 6 days 11:06:11
 4:07:43 MONTR	 3+5 Jobs   Load av (class Symb)   1.21   1.04   0.58
 4:07:43 MONTR	
 4:07:43 MONTR	 No operator in attendance
 4:07:43 MONTR	
 4:07:43 MONTR	 Job  Line Program  User              Foreign host
 4:07:43 MONTR	  16   142  Exec    RAMAZANKHANI
 4:07:43 MONTR	  29*  213  Systat  KESSLER
 4:07:43 MONTR	  37   145  Exec    KROHNFELDT
 4:07:44 MONTR	
 4:07:44 MONTR	   1   206  Dnload  OPERATOR
 4:07:44 MONTR	   2   207  Batcon  OPERATOR
 4:07:44 MONTR	   3   210  Netsrv  OPERATOR
 4:07:44 MONTR	   4   211  Unxftp  OPERATOR
 4:07:44 MONTR	   5   212  Mmailr  OPERATOR
 4:07:44 MONTR	@@psl:psl
 4:07:44 MONTR	[Keeping psl]
 4:07:47 USER	Utah PSL (3.2), 6-Mar-84 
 4:07:47 USER	1 lisp> *(off usermode)
 4:07:47 USER	NIL
 4:07:47 USER	2 lisp> *(dskin "time-traverse.sl")
 4:07:49 USER	NIL
 4:07:49 USER	NIL
 4:07:49 USER	NIL
 4:07:49 USER	NIL
 4:07:49 USER	NIL
 4:07:50 USER	NIL
 4:07:50 USER	NIL
 4:07:55 USER	*** (MARK): base 1345223, length 2 words
 4:07:55 USER	*** (ENTRY6): base 1345225, length 2 words
 4:07:55 USER	*** (ENTRY5): base 1345227, length 2 words
 4:07:55 USER	*** (ENTRY4): base 1345231, length 2 words
 4:07:55 USER	*** (ENTRY3): base 1345233, length 2 words
 4:07:56 USER	*** (ENTRY2): base 1345235, length 2 words
 4:07:56 USER	*** (ENTRY1): base 1345237, length 2 words
 4:07:56 USER	*** (SN): base 1345241, length 2 words
 4:07:56 USER	*** (SONS): base 1345243, length 2 words
 4:07:56 USER	*** (PARENTS): base 1345245, length 2 words
 4:07:56 USER	*** (MAKE-NODE): base 1345247, length 2 words
 4:07:56 USER	*** (ALTER-NODE): base 1345251, length 2 words
 4:07:56 USER	NODE
 4:07:56 USER	NIL
 4:07:56 USER	*** (SNB): base 1345253, length 4 words
 4:07:56 USER	SNB
 4:07:56 USER	0
 4:07:56 USER	*** (MOD): base 1345261, length 13 words
 4:07:56 USER	MOD
 4:07:56 USER	NIL
 4:07:56 USER	21
 4:07:56 USER	*** (SEED): base 1345305, length 4 words
 4:07:56 USER	SEED
 4:07:57 USER	*** (RANDOM): base 1345314, length 7 words
 4:07:57 USER	RANDOM
 4:07:57 USER	NIL
 4:07:58 USER	*** (REMOVE): base 1345326, length 67 words
 4:07:58 USER	REMOVE
 4:07:58 USER	*** (SELECT): base 1345434, length 11 words
 4:07:58 USER	SELECT
 4:07:59 USER	*** (ADD): base 1345447, length 26 words
 4:07:59 USER	ADD
 4:08:00 USER	*** (CREATE-STRUCTURE): base 1345516, length 116 words
 4:08:00 USER	CREATE-STRUCTURE
 4:08:01 USER	*** (FIND-ROOT): base 1345702, length 18 words
 4:08:01 USER	FIND-ROOT
 4:08:01 USER	NIL
 4:08:01 USER	NIL
 4:08:02 USER	*** (TRAVERS): base 1345732, length 65 words
 4:08:02 USER	TRAVERS
 4:08:02 USER	*** (TRAVERSE): base 1346040, length 17 words
 4:08:02 USER	TRAVERSE
 4:08:03 USER	NIL
 4:08:03 USER	NIL
 4:08:04 USER	*** (INIT-TIMIT): base 1346065, length 181 words
 4:08:04 USER	INIT-TIMIT
 4:08:04 USER	Traverse init Test.
 4:08:04 USER	"Traverse init Test."
 4:08:04 USER	
 4:08:04 USER	Timing performed on DEC-20
 4:08:04 USER	11-Mar-84 04:08:04 .
 4:08:04 USER	*** Garbage collection starting
 4:08:05 USER	*** GC 3: time 948 ms, 72519 recovered, 252607 free
 4:08:14 USER	
 4:08:14 USER	Cpu (- GC) Time = 7.842 secs
 4:08:14 USER	Elapsed Time = 8.0 secs
 4:08:14 USER	Wholine Time = 0.0
 4:08:14 USER	GC Time = 0.0 secs
 4:08:14 USER	Load Average Before  = 1.4
 4:08:14 USER	Load Average After   = 1.3
 4:08:14 USER	Average Load Average = 1.35
 4:08:14 USER	NIL
 4:08:15 USER	*** (TIMIT): base 1346355, length 197 words
 4:08:15 USER	TIMIT
 4:08:15 USER	Traverse Test.
 4:08:15 USER	"Traverse Test."
 4:08:15 USER	
 4:08:15 USER	Timing performed on DEC-20
 4:08:15 USER	11-Mar-84 04:08:15 .
 4:08:15 USER	*** Garbage collection starting
 4:08:19 USER	*** GC 4: time 3658 ms, 41768 recovered, 202583 free
 4:09:08 USER	
 4:09:08 USER	Cpu (- GC) Time = 43.226 secs
 4:09:08 USER	Elapsed Time = 48.0 secs
 4:09:08 USER	Wholine Time = 0.0
 4:09:08 USER	GC Time = 0.0 secs
 4:09:08 USER	Load Average Before  = 1.3
 4:09:08 USER	Load Average After   = 1.2
 4:09:08 USER	Average Load Average = 1.25
 4:09:08 USER	NIL
 4:09:08 USER	NIL
 4:09:08 USER	3 lisp> *(quit)
 4:09:08 MONTR	@@reset psl
 4:09:08 MONTR	@@systat
 4:09:08 MONTR	 Sun 11-Mar-84 04:09:08  Up 6 days 11:07:36
 4:09:08 MONTR	 3+5 Jobs   Load av (class Symb)   1.23   1.09   0.64
 4:09:08 MONTR	
 4:09:08 MONTR	 No operator in attendance
 4:09:08 MONTR	
 4:09:08 MONTR	 Job  Line Program  User              Foreign host
 4:09:08 MONTR	  16   142  Emacs   RAMAZANKHANI
 4:09:08 MONTR	  29*  213  Systat  KESSLER
 4:09:08 MONTR	  37   145  Exec    KROHNFELDT
 4:09:08 MONTR	
 4:09:08 MONTR	   1   206  Dnload  OPERATOR
 4:09:08 MONTR	   2   207  Batcon  OPERATOR
 4:09:08 MONTR	   3   210  Netsrv  OPERATOR
 4:09:09 MONTR	   4   211  Unxftp  OPERATOR
 4:09:09 MONTR	   5   212  Mmailr  OPERATOR
 4:09:09 MONTR	@@psl:psl
 4:09:09 MONTR	[Keeping psl]
 4:09:12 USER	Utah PSL (3.2), 6-Mar-84 
 4:09:12 USER	1 lisp> *(off usermode)
 4:09:12 USER	NIL
 4:09:12 USER	2 lisp> *(dskin "time-triang.sl")
 4:09:14 USER	NIL
 4:09:14 USER	NIL
 4:09:14 USER	NIL
 4:09:14 USER	NIL
 4:09:15 USER	[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1]
 4:09:15 USER	0
 4:09:15 USER	[0 0 0 0 0 0 0 0 0 0 0 0 0 0]
 4:09:15 USER	[1 2 4 3 5 6 1 3 6 2 5 4 11 12 13 7 8 4 4 7 11 8 12 13 6 10 15 9 14 13 
 4:09:15 USER	13 14 15 9 10 6]
 4:09:15 USER	[2 4 7 5 8 9 3 6 10 5 9 8 12 13 14 8 9 5 2 4 7 5 8 9 3 6 10 5 9 8 12 13 
 4:09:15 USER	14 8 9 5]
 4:09:15 USER	[4 7 11 8 12 13 6 10 15 9 14 13 13 14 15 9 10 6 1 2 4 3 5 6 1 3 6 2 5 4 
 4:09:15 USER	11 12 13 7 8 4]
 4:09:21 USER	*** (LAST-POSITION): base 1331625, length 17 words
 4:09:21 USER	LAST-POSITION
 4:09:22 USER	*** (TRY): base 1331655, length 113 words
 4:09:22 USER	TRY
 4:09:23 USER	*** (GOGOGO): base 1332041, length 14 words
 4:09:23 USER	GOGOGO
 4:09:24 USER	*** (TIMIT): base 1332062, length 179 words
 4:09:24 USER	TIMIT
 4:09:24 USER	*** (TEST): base 1332347, length 19 words
 4:09:24 USER	TEST
 4:09:24 USER	Triang test
 4:09:24 USER	"Triang test"
 4:09:24 USER	
 4:09:24 USER	Timing performed on DEC-20
 4:09:24 USER	11-Mar-84 04:09:24 .
 4:09:24 USER	*** Garbage collection starting
 4:09:25 USER	*** GC 3: time 865 ms, 39958 recovered, 253220 free
 4:10:56 USER	
 4:10:56 USER	Cpu (- GC) Time = 86.574 secs
 4:10:56 USER	Elapsed Time = 90.0 secs
 4:10:56 USER	Wholine Time = 0.0
 4:10:56 USER	GC Time = 0.0 secs
 4:10:56 USER	Load Average Before  = 1.2
 4:10:56 USER	Load Average After   = 1.1
 4:10:56 USER	Average Load Average = 1.15
 4:10:56 USER	NIL
 4:10:56 USER	NIL
 4:10:56 USER	3 lisp> *(quit)
 4:10:56 MONTR	@@reset psl
 4:10:56 MONTR	@@systat
 4:10:56 MONTR	 Sun 11-Mar-84 04:10:56  Up 6 days 11:09:24
 4:10:56 MONTR	 3+5 Jobs   Load av (class Symb)   1.10   1.09   0.69
 4:10:56 MONTR	
 4:10:56 MONTR	 No operator in attendance
 4:10:56 MONTR	
 4:10:56 MONTR	 Job  Line Program  User              Foreign host
 4:10:56 MONTR	  16   142  Emacs   RAMAZANKHANI
 4:10:57 MONTR	  29*  213  Systat  KESSLER
 4:10:57 MONTR	  37   145  Exec    KROHNFELDT
 4:10:57 MONTR	
 4:10:57 MONTR	   1   206  Dnload  OPERATOR
 4:10:57 MONTR	   2   207  Batcon  OPERATOR
 4:10:57 MONTR	   3   210  Netsrv  OPERATOR
 4:10:57 MONTR	   4   211  Unxftp  OPERATOR
 4:10:57 MONTR	   5   212  Mmailr  OPERATOR
 4:10:57 MONTR	@@psl:psl
 4:10:57 MONTR	[Keeping psl]
 4:11:00 USER	Utah PSL (3.2), 6-Mar-84 
 4:11:00 USER	1 lisp> *(off usermode)
 4:11:00 USER	NIL
 4:11:00 USER	2 lisp> *(dskin "time-tak.sl")
 4:11:02 USER	NIL
 4:11:02 USER	NIL
 4:11:03 USER	NIL
 4:11:03 USER	NIL
 4:11:03 USER	NIL
 4:11:03 USER	NIL
 4:11:08 USER	*** (TAK): base 1331177, length 29 words
 4:11:08 USER	TAK
 4:11:08 USER	NIL
 4:11:08 USER	NIL
 4:11:09 USER	*** (TIMIT): base 1331237, length 184 words
 4:11:09 USER	TIMIT
 4:11:10 USER	*** (NC-TIMIT): base 1331532, length 184 words
 4:11:10 USER	NC-TIMIT
 4:11:10 USER	TAK Test with 18 12 and 6
 4:11:10 USER	"TAK Test with 18 12 and 6"
 4:11:10 USER	
 4:11:10 USER	Timing performed on DEC-20
 4:11:10 USER	11-Mar-84 04:11:10 .
 4:11:10 USER	*** Garbage collection starting
 4:11:11 USER	*** GC 3: time 859 ms, 31705 recovered, 253480 free
 4:11:12 USER	
 4:11:12 USER	Cpu (- GC) Time = .468 secs
 4:11:12 USER	Elapsed Time = 1.0 secs
 4:11:12 USER	Wholine Time = 0.0
 4:11:12 USER	GC Time = 0.0 secs
 4:11:12 USER	Load Average Before  = 1.1
 4:11:12 USER	Load Average After   = 1.1
 4:11:12 USER	Average Load Average = 1.1
 4:11:12 USER	NIL
 4:11:12 USER	TAK Test with 10018 10012 and 10006
 4:11:12 USER	"TAK Test with 10018 10012 and 10006"
 4:11:12 USER	
 4:11:12 USER	Timing performed on DEC-20
 4:11:12 USER	11-Mar-84 04:11:12 .
 4:11:12 USER	*** Garbage collection starting
 4:11:13 USER	*** GC 4: time 818 ms, 177 recovered, 253480 free
 4:11:14 USER	
 4:11:14 USER	Cpu (- GC) Time = .49 secs
 4:11:14 USER	Elapsed Time = 0.0 secs
 4:11:14 USER	Wholine Time = 0.0
 4:11:14 USER	GC Time = 0.0 secs
 4:11:14 USER	Load Average Before  = 1.1
 4:11:14 USER	Load Average After   = 1.1
 4:11:14 USER	Average Load Average = 1.1
 4:11:14 USER	NIL
 4:11:14 USER	NIL
 4:11:14 USER	*** (LISTN): base 1332025, length 12 words
 4:11:14 USER	LISTN
 4:11:14 USER	*** (MAS): base 1332046, length 35 words
 4:11:14 USER	MAS
 4:11:15 USER	*** (SHORTERP): base 1332111, length 15 words
 4:11:15 USER	SHORTERP
 4:11:15 USER	NIL
 4:11:15 USER	NIL
 4:11:15 USER	NIL
 4:11:15 USER	(18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1)
 4:11:15 USER	(12 11 10 9 8 7 6 5 4 3 2 1)
 4:11:15 USER	(6 5 4 3 2 1)
 4:11:16 USER	*** (TIMIT): base 1332136, length 184 words
 4:11:16 USER	TIMIT
 4:11:16 USER	TAKL - TAK with lists
 4:11:16 USER	"TAKL - TAK with lists"
 4:11:16 USER	
 4:11:16 USER	Timing performed on DEC-20
 4:11:16 USER	11-Mar-84 04:11:16 .
 4:11:16 USER	*** Garbage collection starting
 4:11:17 USER	*** GC 5: time 830 ms, 20739 recovered, 253396 free
 4:11:20 USER	
 4:11:20 USER	Cpu (- GC) Time = 2.521 secs
 4:11:20 USER	Elapsed Time = 3.0 secs
 4:11:20 USER	Wholine Time = 0.0
 4:11:20 USER	GC Time = 0.0 secs
 4:11:20 USER	Load Average Before  = 1.1
 4:11:20 USER	Load Average After   = 1.1
 4:11:20 USER	Average Load Average = 1.1
 4:11:20 USER	NIL
 4:11:20 USER	NIL
 4:11:21 USER	*** (TIMIT): base 1332430, length 184 words
 4:11:21 USER	TIMIT
 4:11:21 USER	NIL
 4:11:22 USER	NIL
 4:11:23 USER	*** (TAK0): base 1333360, length 30 words
 4:11:23 USER	TAK0
 4:11:23 USER	*** (TAK1): base 1333416, length 30 words
 4:11:23 USER	TAK1
 4:11:24 USER	*** (TAK2): base 1333454, length 30 words
 4:11:24 USER	TAK2
 4:11:24 USER	*** (TAK3): base 1333512, length 30 words
 4:11:24 USER	TAK3
 4:11:24 USER	*** (TAK4): base 1333550, length 30 words
 4:11:24 USER	TAK4
 4:11:25 USER	*** (TAK5): base 1333606, length 30 words
 4:11:25 USER	TAK5
 4:11:25 USER	*** (TAK6): base 1333644, length 30 words
 4:11:25 USER	TAK6
 4:11:25 USER	*** (TAK7): base 1333702, length 30 words
 4:11:25 USER	TAK7
 4:11:26 USER	*** (TAK8): base 1333740, length 30 words
 4:11:26 USER	TAK8
 4:11:26 USER	*** (TAK9): base 1333776, length 30 words
 4:11:26 USER	TAK9
 4:11:27 USER	*** (TAK10): base 1334034, length 30 words
 4:11:27 USER	TAK10
 4:11:27 USER	*** (TAK11): base 1334072, length 30 words
 4:11:27 USER	TAK11
 4:11:27 USER	*** (TAK12): base 1334130, length 30 words
 4:11:27 USER	TAK12
 4:11:28 USER	*** (TAK13): base 1334166, length 30 words
 4:11:28 USER	TAK13
 4:11:28 USER	*** (TAK14): base 1334224, length 30 words
 4:11:28 USER	TAK14
 4:11:28 USER	*** (TAK15): base 1334262, length 30 words
 4:11:29 USER	TAK15
 4:11:29 USER	*** (TAK16): base 1334320, length 30 words
 4:11:29 USER	TAK16
 4:11:29 USER	*** (TAK17): base 1334356, length 30 words
 4:11:29 USER	TAK17
 4:11:29 USER	*** (TAK18): base 1334414, length 30 words
 4:11:29 USER	TAK18
 4:11:29 USER	*** (TAK19): base 1334452, length 30 words
 4:11:30 USER	TAK19
 4:11:30 USER	*** (TAK20): base 1334510, length 30 words
 4:11:30 USER	TAK20
 4:11:31 USER	*** (TAK21): base 1334546, length 30 words
 4:11:31 USER	TAK21
 4:11:31 USER	*** (TAK22): base 1334604, length 30 words
 4:11:31 USER	TAK22
 4:11:31 USER	*** (TAK23): base 1334642, length 30 words
 4:11:31 USER	TAK23
 4:11:32 USER	*** (TAK24): base 1334700, length 30 words
 4:11:32 USER	TAK24
 4:11:32 USER	*** (TAK25): base 1334736, length 30 words
 4:11:32 USER	TAK25
 4:11:32 USER	*** (TAK26): base 1334774, length 30 words
 4:11:32 USER	TAK26
 4:11:33 USER	*** (TAK27): base 1335032, length 30 words
 4:11:33 USER	TAK27
 4:11:33 USER	*** (TAK28): base 1335070, length 30 words
 4:11:33 USER	TAK28
 4:11:34 USER	*** (TAK29): base 1335126, length 30 words
 4:11:34 USER	TAK29
 4:11:34 USER	*** (TAK30): base 1335164, length 30 words
 4:11:34 USER	TAK30
 4:11:34 USER	*** (TAK31): base 1335222, length 30 words
 4:11:34 USER	TAK31
 4:11:35 USER	*** (TAK32): base 1335260, length 30 words
 4:11:35 USER	TAK32
 4:11:35 USER	*** (TAK33): base 1335316, length 30 words
 4:11:35 USER	TAK33
 4:11:35 USER	*** (TAK34): base 1335354, length 30 words
 4:11:35 USER	TAK34
 4:11:36 USER	*** (TAK35): base 1335412, length 30 words
 4:11:36 USER	TAK35
 4:11:36 USER	*** (TAK36): base 1335450, length 30 words
 4:11:36 USER	TAK36
 4:11:36 USER	*** (TAK37): base 1335506, length 30 words
 4:11:37 USER	TAK37
 4:11:37 USER	*** (TAK38): base 1335544, length 30 words
 4:11:37 USER	TAK38
 4:11:37 USER	*** (TAK39): base 1335602, length 30 words
 4:11:37 USER	TAK39
 4:11:37 USER	*** (TAK40): base 1335640, length 30 words
 4:11:38 USER	TAK40
 4:11:38 USER	*** (TAK41): base 1335676, length 30 words
 4:11:38 USER	TAK41
 4:11:38 USER	*** (TAK42): base 1335734, length 30 words
 4:11:38 USER	TAK42
 4:11:39 USER	*** (TAK43): base 1335772, length 30 words
 4:11:39 USER	TAK43
 4:11:39 USER	*** (TAK44): base 1336030, length 30 words
 4:11:39 USER	TAK44
 4:11:39 USER	*** (TAK45): base 1336066, length 30 words
 4:11:39 USER	TAK45
 4:11:39 USER	*** (TAK46): base 1336124, length 30 words
 4:11:39 USER	TAK46
 4:11:40 USER	*** (TAK47): base 1336162, length 30 words
 4:11:40 USER	TAK47
 4:11:40 USER	*** (TAK48): base 1336220, length 30 words
 4:11:40 USER	TAK48
 4:11:40 USER	*** (TAK49): base 1336256, length 30 words
 4:11:40 USER	TAK49
 4:11:41 USER	*** (TAK50): base 1336314, length 30 words
 4:11:41 USER	TAK50
 4:11:41 USER	*** (TAK51): base 1336352, length 30 words
 4:11:41 USER	TAK51
 4:11:41 USER	*** (TAK52): base 1336410, length 30 words
 4:11:41 USER	TAK52
 4:11:42 USER	*** (TAK53): base 1336446, length 30 words
 4:11:42 USER	TAK53
 4:11:42 USER	*** (TAK54): base 1336504, length 30 words
 4:11:42 USER	TAK54
 4:11:44 USER	*** (TAK55): base 1336542, length 30 words
 4:11:44 USER	TAK55
 4:11:44 USER	*** (TAK56): base 1336600, length 30 words
 4:11:44 USER	TAK56
 4:11:44 USER	*** (TAK57): base 1336636, length 30 words
 4:11:44 USER	TAK57
 4:11:45 USER	*** (TAK58): base 1336674, length 30 words
 4:11:45 USER	TAK58
 4:11:45 USER	*** (TAK59): base 1336732, length 30 words
 4:11:45 USER	TAK59
 4:11:45 USER	*** (TAK60): base 1336770, length 30 words
 4:11:45 USER	TAK60
 4:11:46 USER	*** (TAK61): base 1337026, length 30 words
 4:11:46 USER	TAK61
 4:11:46 USER	*** (TAK62): base 1337064, length 30 words
 4:11:46 USER	TAK62
 4:11:46 USER	*** (TAK63): base 1337122, length 30 words
 4:11:46 USER	TAK63
 4:11:47 USER	*** (TAK64): base 1337160, length 30 words
 4:11:47 USER	TAK64
 4:11:47 USER	*** Garbage collection starting
 4:11:49 USER	*** GC 6: time 1135 ms, 252723 recovered, 252724 free
 4:11:49 USER	*** (TAK65): base 1337216, length 30 words
 4:11:49 USER	TAK65
 4:11:49 USER	*** (TAK66): base 1337254, length 30 words
 4:11:49 USER	TAK66
 4:11:49 USER	*** (TAK67): base 1337312, length 30 words
 4:11:49 USER	TAK67
 4:11:50 USER	*** (TAK68): base 1337350, length 30 words
 4:11:50 USER	TAK68
 4:11:50 USER	*** (TAK69): base 1337406, length 30 words
 4:11:50 USER	TAK69
 4:11:51 USER	*** (TAK70): base 1337444, length 30 words
 4:11:51 USER	TAK70
 4:11:51 USER	*** (TAK71): base 1337502, length 30 words
 4:11:51 USER	TAK71
 4:11:51 USER	*** (TAK72): base 1337540, length 30 words
 4:11:51 USER	TAK72
 4:11:52 USER	*** (TAK73): base 1337576, length 30 words
 4:11:52 USER	TAK73
 4:11:52 USER	*** (TAK74): base 1337634, length 30 words
 4:11:52 USER	TAK74
 4:11:52 USER	*** (TAK75): base 1337672, length 30 words
 4:11:52 USER	TAK75
 4:11:53 USER	*** (TAK76): base 1337730, length 30 words
 4:11:53 USER	TAK76
 4:11:53 USER	*** (TAK77): base 1337766, length 30 words
 4:11:53 USER	TAK77
 4:11:53 USER	*** (TAK78): base 1340024, length 30 words
 4:11:53 USER	TAK78
 4:11:54 USER	*** (TAK79): base 1340062, length 30 words
 4:11:54 USER	TAK79
 4:11:54 USER	*** (TAK80): base 1340120, length 30 words
 4:11:54 USER	TAK80
 4:11:54 USER	*** (TAK81): base 1340156, length 30 words
 4:11:54 USER	TAK81
 4:11:54 USER	*** (TAK82): base 1340214, length 30 words
 4:11:55 USER	TAK82
 4:11:55 USER	*** (TAK83): base 1340252, length 30 words
 4:11:55 USER	TAK83
 4:11:56 USER	*** (TAK84): base 1340310, length 30 words
 4:11:56 USER	TAK84
 4:11:56 USER	*** (TAK85): base 1340346, length 30 words
 4:11:56 USER	TAK85
 4:11:57 USER	*** (TAK86): base 1340404, length 30 words
 4:11:57 USER	TAK86
 4:11:57 USER	*** (TAK87): base 1340442, length 30 words
 4:11:57 USER	TAK87
 4:11:57 USER	*** (TAK88): base 1340500, length 30 words
 4:11:57 USER	TAK88
 4:11:58 USER	*** (TAK89): base 1340536, length 30 words
 4:11:58 USER	TAK89
 4:11:58 USER	*** (TAK90): base 1340574, length 30 words
 4:11:58 USER	TAK90
 4:11:58 USER	*** (TAK91): base 1340632, length 30 words
 4:11:58 USER	TAK91
 4:11:59 USER	*** (TAK92): base 1340670, length 30 words
 4:11:59 USER	TAK92
 4:11:59 USER	*** (TAK93): base 1340726, length 30 words
 4:11:59 USER	TAK93
 4:11:59 USER	*** (TAK94): base 1340764, length 30 words
 4:11:59 USER	TAK94
 4:12:00 USER	*** (TAK95): base 1341022, length 30 words
 4:12:00 USER	TAK95
 4:12:00 USER	*** (TAK96): base 1341060, length 30 words
 4:12:00 USER	TAK96
 4:12:00 USER	*** (TAK97): base 1341116, length 30 words
 4:12:00 USER	TAK97
 4:12:01 USER	*** (TAK98): base 1341154, length 30 words
 4:12:01 USER	TAK98
 4:12:01 USER	*** (TAK99): base 1341212, length 30 words
 4:12:01 USER	TAK99
 4:12:01 USER	NIL
 4:12:01 USER	TAK - Gross Version with Lots of functions
 4:12:01 USER	"TAK - Gross Version with Lots of functions"
 4:12:01 USER	
 4:12:01 USER	Timing performed on DEC-20
 4:12:01 USER	11-Mar-84 04:12:01 .
 4:12:01 USER	*** Garbage collection starting
 4:12:03 USER	*** GC 7: time 936 ms, 126916 recovered, 253396 free
 4:12:03 USER	
 4:12:03 USER	Cpu (- GC) Time = .609 secs
 4:12:03 USER	Elapsed Time = 1.0 secs
 4:12:03 USER	Wholine Time = 0.0
 4:12:03 USER	GC Time = 0.0 secs
 4:12:03 USER	Load Average Before  = 1.1
 4:12:04 USER	Load Average After   = 1.1
 4:12:04 USER	Average Load Average = 1.1
 4:12:04 USER	NIL
 4:12:04 USER	NIL
 4:12:04 USER	NIL
 4:12:04 USER	*** (TAK): base 1341252, length 9 words
 4:12:04 USER	TAK
 4:12:04 USER	*** (STAK): base 1341263, length 54 words
 4:12:04 USER	STAK
 4:12:04 USER	NIL
 4:12:04 USER	NIL
 4:12:04 USER	NIL
 4:12:05 USER	*** (TIMIT): base 1341351, length 184 words
 4:12:05 USER	TIMIT
 4:12:05 USER	STAK - TAK using fluid binding
 4:12:05 USER	"STAK - TAK using fluid binding"
 4:12:05 USER	
 4:12:05 USER	Timing performed on DEC-20
 4:12:05 USER	11-Mar-84 04:12:05 .
 4:12:05 USER	*** Garbage collection starting
 4:12:06 USER	*** GC 8: time 852 ms, 18771 recovered, 253396 free
 4:12:09 USER	
 4:12:09 USER	Cpu (- GC) Time = 2.682 secs
 4:12:09 USER	Elapsed Time = 3.0 secs
 4:12:09 USER	Wholine Time = 0.0
 4:12:09 USER	GC Time = 0.0 secs
 4:12:09 USER	Load Average Before  = 1.1
 4:12:09 USER	Load Average After   = 1.1
 4:12:09 USER	Average Load Average = 1.1
 4:12:09 USER	NIL
 4:12:09 USER	NIL
 4:12:09 USER	NIL
 4:12:09 USER	*** (TAK): base 1341641, length 21 words
 4:12:09 USER	TAK
 4:12:10 USER	*** (TAK1): base 1341666, length 60 words
 4:12:10 USER	TAK1
 4:12:10 USER	NIL
 4:12:10 USER	NIL
 4:12:11 USER	*** (TIMIT): base 1341762, length 184 words
 4:12:11 USER	TIMIT
 4:12:11 USER	CTAK - TAK with catch and throw
 4:12:11 USER	"CTAK - TAK with catch and throw"
 4:12:11 USER	
 4:12:11 USER	Timing performed on DEC-20
 4:12:11 USER	11-Mar-84 04:12:11 .
 4:12:11 USER	*** Garbage collection starting
 4:12:12 USER	*** GC 9: time 837 ms, 23681 recovered, 253396 free
 4:12:15 USER	
 4:12:15 USER	Cpu (- GC) Time = 2.958 secs
 4:12:15 USER	Elapsed Time = 3.0 secs
 4:12:15 USER	Wholine Time = 0.0
 4:12:15 USER	GC Time = 0.0 secs
 4:12:15 USER	Load Average Before  = 1.1
 4:12:15 USER	Load Average After   = 1.1
 4:12:16 USER	Average Load Average = 1.1
 4:12:16 USER	NIL
 4:12:16 USER	NIL
 4:12:16 USER	3 lisp> *(quit)
 4:12:16 MONTR	@@reset psl
 4:12:16 MONTR	@@systat
 4:12:16 MONTR	 Sun 11-Mar-84 04:12:16  Up 6 days 11:10:43
 4:12:16 MONTR	 3+5 Jobs   Load av (class Symb)   1.08   1.08   0.72
 4:12:16 MONTR	
 4:12:16 MONTR	 No operator in attendance
 4:12:16 MONTR	
 4:12:16 MONTR	 Job  Line Program  User              Foreign host
 4:12:16 MONTR	  16   142  Emacs   RAMAZANKHANI
 4:12:16 MONTR	  29*  213  Systat  KESSLER
 4:12:16 MONTR	  37   145  Exec    KROHNFELDT
 4:12:16 MONTR	
 4:12:16 MONTR	   1   206  Dnload  OPERATOR
 4:12:16 MONTR	   2   207  Batcon  OPERATOR
 4:12:16 MONTR	   3   210  Netsrv  OPERATOR
 4:12:16 MONTR	   4   211  Unxftp  OPERATOR
 4:12:16 MONTR	   5   212  Mmailr  OPERATOR
 4:12:16 MONTR	@@psl:psl
 4:12:16 MONTR	[Keeping psl]
 4:12:21 USER	Utah PSL (3.2), 6-Mar-84 
 4:12:21 USER	1 lisp> *(off usermode)
 4:12:22 USER	NIL
 4:12:22 USER	2 lisp> *(dskin "time-frpoly.sl")
 4:12:24 USER	NIL
 4:12:24 USER	NIL
 4:12:24 USER	NIL
 4:12:24 USER	NIL
 4:12:29 USER	*** (POINTERGP): base 1331254, length 35 words
 4:12:29 USER	POINTERGP
 4:12:30 USER	*** (PCOEFP): base 1331322, length 8 words
 4:12:30 USER	PCOEFP
 4:12:30 USER	*** (PZEROP): base 1331335, length 8 words
 4:12:30 USER	PZEROP
 4:12:30 USER	*** (PZERO): base 1331350, length 4 words
 4:12:30 USER	PZERO
 4:12:30 USER	*** (CPLUS): base 1331357, length 13 words
 4:12:30 USER	CPLUS
 4:12:30 USER	*** (CTIMES): base 1331377, length 13 words
 4:12:30 USER	CTIMES
 4:12:30 USER	*** (PCOEFADD): base 1331417, length 19 words
 4:12:30 USER	PCOEFADD
 4:12:31 USER	*** (PCPLUS): base 1331453, length 17 words
 4:12:31 USER	PCPLUS
 4:12:31 USER	*** (PCPLUS1): base 1331477, length 43 words
 4:12:31 USER	PCPLUS1
 4:12:31 USER	*** (PCTIMES): base 1331560, length 15 words
 4:12:31 USER	PCTIMES
 4:12:32 USER	*** (PCTIMES1): base 1331602, length 25 words
 4:12:32 USER	PCTIMES1
 4:12:32 USER	*** (PPLUS): base 1331636, length 60 words
 4:12:32 USER	PPLUS
 4:12:33 USER	*** (PPLUS1): base 1331732, length 70 words
 4:12:33 USER	PPLUS1
 4:12:33 USER	*** (PSIMP): base 1332040, length 27 words
 4:12:33 USER	PSIMP
 4:12:34 USER	*** (PTIMES): base 1332076, length 77 words
 4:12:34 USER	PTIMES
 4:12:34 USER	*** (PTIMES1): base 1332221, length 28 words
 4:12:34 USER	PTIMES1
 4:12:35 USER	*** (PTIMES2): base 1332255, length 31 words
 4:12:35 USER	PTIMES2
 4:12:37 USER	*** (PTIMES3): base 1332317, length 192 words
 4:12:37 USER	PTIMES3
 4:12:37 USER	*** (PEXPTSQ): base 1332622, length 44 words
 4:12:37 USER	PEXPTSQ
 4:12:37 USER	*** (SETUP): base 1332701, length 55 words
 4:12:37 USER	SETUP
 4:12:37 USER	NIL
 4:12:38 USER	*** (TIMIT1): base 1332773, length 184 words
 4:12:38 USER	TIMIT1
 4:12:39 USER	*** (TIMIT2): base 1333266, length 184 words
 4:12:39 USER	TIMIT2
 4:12:40 USER	*** (TIMIT3): base 1333561, length 184 words
 4:12:40 USER	TIMIT3
 4:12:40 USER	*** (BENCH): base 1334065, length 18 words
 4:12:40 USER	BENCH
 4:12:42 USER	SetupGlobals
 4:12:42 USER	 ... done
 4:12:42 USER	NIL
 4:12:42 USER	FPOLY
 4:12:42 USER	"FPOLY"
 4:12:42 USER	(Z 1 1.0 0 (Y 1 1.0 0 (X 1 1.0 0 1.0)))
 4:12:42 USER	(bench 2)
 4:12:42 USER	"(bench 2)"
 4:12:42 USER	TEST1
 4:12:42 USER	
 4:12:42 USER	Timing performed on DEC-20
 4:12:42 USER	11-Mar-84 04:12:42 .
 4:12:42 USER	*** Garbage collection starting
 4:12:43 USER	*** GC 3: time 1005 ms, 128995 recovered, 252903 free
 4:12:43 USER	
 4:12:43 USER	Cpu (- GC) Time = .023 secs
 4:12:43 USER	Elapsed Time = 0.0 secs
 4:12:43 USER	Wholine Time = 0.0
 4:12:43 USER	GC Time = 0.0 secs
 4:12:43 USER	Load Average Before  = 1.1
 4:12:43 USER	Load Average After   = 1.1
 4:12:43 USER	Average Load Average = 1.1
 4:12:43 USER	TEST2
 4:12:43 USER	
 4:12:43 USER	Timing performed on DEC-20
 4:12:43 USER	11-Mar-84 04:12:43 .
 4:12:43 USER	*** Garbage collection starting
 4:12:45 USER	*** GC 4: time 890 ms, 410 recovered, 252903 free
 4:12:45 USER	
 4:12:45 USER	Cpu (- GC) Time = .039 secs
 4:12:45 USER	Elapsed Time = 0.0 secs
 4:12:45 USER	Wholine Time = 0.0
 4:12:45 USER	GC Time = 0.0 secs
 4:12:45 USER	Load Average Before  = 1.1
 4:12:45 USER	Load Average After   = 1.1
 4:12:45 USER	Average Load Average = 1.1
 4:12:45 USER	TEST3
 4:12:45 USER	
 4:12:45 USER	Timing performed on DEC-20
 4:12:45 USER	11-Mar-84 04:12:45 .
 4:12:45 USER	*** Garbage collection starting
 4:12:46 USER	*** GC 5: time 867 ms, 836 recovered, 252903 free
 4:12:46 USER	
 4:12:46 USER	Cpu (- GC) Time = .025 secs
 4:12:46 USER	Elapsed Time = 0.0 secs
 4:12:46 USER	Wholine Time = 0.0
 4:12:46 USER	GC Time = 0.0 secs
 4:12:46 USER	Load Average Before  = 1.1
 4:12:46 USER	Load Average After   = 1.1
 4:12:46 USER	Average Load Average = 1.1
 4:12:46 USER	NIL
 4:12:46 USER	(bench 5)
 4:12:46 USER	"(bench 5)"
 4:12:46 USER	TEST1
 4:12:46 USER	
 4:12:46 USER	Timing performed on DEC-20
 4:12:46 USER	11-Mar-84 04:12:46 .
 4:12:46 USER	*** Garbage collection starting
 4:12:46 USER	*** GC 6: time 876 ms, 544 recovered, 252903 free
 4:12:48 USER	
 4:12:48 USER	Cpu (- GC) Time = .093 secs
 4:12:48 USER	Elapsed Time = 1.0 secs
 4:12:48 USER	Wholine Time = 0.0
 4:12:48 USER	GC Time = 0.0 secs
 4:12:48 USER	Load Average Before  = 1.1
 4:12:48 USER	Load Average After   = 1.1
 4:12:48 USER	Average Load Average = 1.1
 4:12:48 USER	TEST2
 4:12:48 USER	
 4:12:48 USER	Timing performed on DEC-20
 4:12:48 USER	11-Mar-84 04:12:48 .
 4:12:48 USER	*** Garbage collection starting
 4:12:49 USER	*** GC 7: time 867 ms, 2138 recovered, 252903 free
 4:12:49 USER	
 4:12:49 USER	Cpu (- GC) Time = .425 secs
 4:12:49 USER	Elapsed Time = 0.0 secs
 4:12:50 USER	Wholine Time = 0.0
 4:12:50 USER	GC Time = 0.0 secs
 4:12:50 USER	Load Average Before  = 1.1
 4:12:50 USER	Load Average After   = 1.1
 4:12:50 USER	Average Load Average = 1.1
 4:12:50 USER	TEST3
 4:12:50 USER	
 4:12:50 USER	Timing performed on DEC-20
 4:12:50 USER	11-Mar-84 04:12:49 .
 4:12:50 USER	*** Garbage collection starting
 4:12:51 USER	*** GC 8: time 876 ms, 9594 recovered, 252903 free
 4:12:51 USER	
 4:12:51 USER	Cpu (- GC) Time = .136 secs
 4:12:51 USER	Elapsed Time = 1.0 secs
 4:12:51 USER	Wholine Time = 0.0
 4:12:51 USER	GC Time = 0.0 secs
 4:12:51 USER	Load Average Before  = 1.1
 4:12:51 USER	Load Average After   = 1.1
 4:12:51 USER	Average Load Average = 1.1
 4:12:51 USER	NIL
 4:12:51 USER	(bench 10)
 4:12:51 USER	"(bench 10)"
 4:12:51 USER	TEST1
 4:12:51 USER	
 4:12:51 USER	Timing performed on DEC-20
 4:12:51 USER	11-Mar-84 04:12:51 .
 4:12:51 USER	*** Garbage collection starting
 4:12:52 USER	*** GC 9: time 888 ms, 3410 recovered, 252903 free
 4:12:54 USER	
 4:12:54 USER	Cpu (- GC) Time = .936 secs
 4:12:54 USER
-------

∂23-Mar-84  2335	GALWAY@UTAH-20.ARPA 	Latest PSL times on DEC-20   
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 23 Mar 84  23:34:36 PST
Date: Sat 24 Mar 84 00:34:54-MST
From: William Galway <Galway@UTAH-20.ARPA>
Subject: Latest PSL times on DEC-20
To: pw@SU-AI.ARPA
cc: RPG@SU-AI.ARPA

Here are the newly collected (and complete) timings for PSL on
our DEC-20.  I'm going to try collecting the Vax timings this
weekend, I'll defer sending the source files until I try them out
there (assuming that's OK).  Hope this layout is OK, I've deleted
lots of stuff from the log file, tried to make it "pretty".  I've
suppressed the printing of the "wholine" time.  (What is wholine
time anyway?  I'd be glad to include it if appropriate and I can
figure out how to measure it.)

------------------------------------------------------------
Boyer Test
Timing performed on DEC-20
23-Mar-84 05:03:37 .
*** Garbage collection starting
*** GC 4: time 4833 ms, 179307 recovered, 179307 free
*** Garbage collection starting
*** GC 5: time 6772 ms, 140848 recovered, 140849 free
........................................
Cpu (- GC) Time = 11.96 secs
Elapsed Time = 27.0 secs
GC Time = 11.605 secs
Load Average Before  = 1.2
Load Average After   = 1.1
Average Load Average = 1.15
------------------------------------------------------------
------------------------------------------------------------
browse
Timing performed on DEC-20
23-Mar-84 05:04:29 .
*** Garbage collection starting
*** GC 4: time 1509 ms, 244799 recovered, 244799 free
*** Garbage collection starting
*** GC 5: time 1688 ms, 244834 recovered, 244835 free
*** Garbage collection starting
*** GC 6: time 1699 ms, 244796 recovered, 244797 free
........................................
Cpu (- GC) Time = 23.819 secs
Elapsed Time = 31.0 secs
GC Time = 4.896 secs
Load Average Before  = 1.1
Load Average After   = 1.1
Average Load Average = 1.1
------------------------------------------------------------
------------------------------------------------------------
Deriv Test.
Timing performed on DEC-20
24-Mar-84 00:19:43 .
*** Garbage collection starting
*** GC 4: time 1235 ms, 253393 recovered, 253393 free
........................................
Cpu (- GC) Time = 5.643 secs
Elapsed Time = 10.0 secs
GC Time = 1.235 secs
Load Average Before  = .8
Load Average After   = .8
Average Load Average = .8
------------------------------------------------------------
------------------------------------------------------------
DDeriv Test, also same as FDDeriv
Timing performed on DEC-20
23-Mar-84 05:05:19 .
*** Garbage collection starting
*** GC 4: time 1035 ms, 253407 recovered, 253407 free
*** Garbage collection starting
*** GC 5: time 1035 ms, 253448 recovered, 253449 free
........................................
Cpu (- GC) Time = 6.006 secs
Elapsed Time = 9.0 secs
GC Time = 2.07 secs
Load Average Before  = 1.1
Load Average After   = 1.1
Average Load Average = 1.1
------------------------------------------------------------
------------------------------------------------------------
`Destructive' Test
Timing performed on DEC-20
23-Mar-84 05:06:02 .
........................................
Cpu (- GC) Time = 2.381 secs
Elapsed Time = 2.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.1
Load Average After   = 1.1
Average Load Average = 1.1
------------------------------------------------------------
------------------------------------------------------------
Div test 1, iterative version of div
Timing performed on DEC-20
23-Mar-84 05:06:22 .
........................................
Cpu (- GC) Time = 2.3 secs
Elapsed Time = 2.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.1
Load Average After   = 1.1
Average Load Average = 1.1
------------------------------------------------------------
------------------------------------------------------------
Div test 2, recursive version of div
Timing performed on DEC-20
23-Mar-84 05:06:26 .
........................................
Cpu (- GC) Time = 2.343 secs
Elapsed Time = 3.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.1
Load Average After   = 1.1
Average Load Average = 1.1
------------------------------------------------------------
------------------------------------------------------------
FFT Test
Timing performed on DEC-20
23-Mar-84 05:06:52 .
*** Garbage collection starting
*** GC 4: time 1401 ms, 244956 recovered, 244958 free
*** Garbage collection starting
*** GC 5: time 1815 ms, 244947 recovered, 244949 free
*** Garbage collection starting
*** GC 6: time 1362 ms, 244950 recovered, 244952 free
*** Garbage collection starting
*** GC 7: time 1337 ms, 244947 recovered, 244949 free
*** Garbage collection starting
*** GC 8: time 1421 ms, 244956 recovered, 244958 free
*** Garbage collection starting
*** GC 9: time 1532 ms, 244950 recovered, 244952 free
*** Garbage collection starting
*** GC 10: time 1897 ms, 244950 recovered, 244952 free
........................................
Cpu (- GC) Time = 35.445 secs
Elapsed Time = 51.0 secs
GC Time = 10.765 secs
Load Average Before  = 1.1
Load Average After   = 1.0
Average Load Average = 1.05
------------------------------------------------------------
------------------------------------------------------------
Fprint Test
Timing performed on DEC-20
23-Mar-84 05:08:21 .
........................................
Cpu (- GC) Time = 4.778 secs
Elapsed Time = 6.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.3
Load Average After   = 1.3
Average Load Average = 1.3
------------------------------------------------------------
------------------------------------------------------------
fread test
Timing performed on DEC-20
24-Mar-84 00:20:13 .
........................................
Cpu (- GC) Time = 5.829 secs
Elapsed Time = 7.0 secs
GC Time = 0.0 secs
Load Average Before  = .9
Load Average After   = .9
Average Load Average = .9
------------------------------------------------------------
------------------------------------------------------------
Puzzle test
Timing performed on DEC-20
23-Mar-84 05:09:07 .
success in 2005 trials
........................................
Cpu (- GC) Time = 15.924 secs
Elapsed Time = 18.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.2
Average Load Average = 1.2
------------------------------------------------------------
------------------------------------------------------------
tprint test
Timing performed on DEC-20
23-Mar-84 05:09:58 .
........................................
Cpu (- GC) Time = 4.253 secs
Elapsed Time = 12.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.4
Load Average After   = 1.4
Average Load Average = 1.4
------------------------------------------------------------
------------------------------------------------------------
Traverse init Test
Timing performed on DEC-20
23-Mar-84 05:10:38 .
........................................
Cpu (- GC) Time = 7.596 secs
Elapsed Time = 8.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.3
Load Average After   = 1.3
Average Load Average = 1.3
------------------------------------------------------------
------------------------------------------------------------
Traverse Test
Timing performed on DEC-20
23-Mar-84 05:10:52 .
........................................
Cpu (- GC) Time = 43.885 secs
Elapsed Time = 49.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.3
Load Average After   = 1.2
Average Load Average = 1.25
------------------------------------------------------------
------------------------------------------------------------
Triang test
Timing performed on DEC-20
23-Mar-84 05:12:03 .
........................................
Cpu (- GC) Time = 86.948 secs
Elapsed Time = 96.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.1
Average Load Average = 1.15
------------------------------------------------------------
------------------------------------------------------------
TAK: Takai test, (TAK 18 12 6)
Timing performed on DEC-20
23-Mar-84 05:13:56 .
........................................
Cpu (- GC) Time = .485 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.1
Load Average After   = 1.1
Average Load Average = 1.1
------------------------------------------------------------
------------------------------------------------------------
TAK: Takai test, (tak 10018 10012 10006)
Timing performed on DEC-20
23-Mar-84 05:13:58 .
........................................
Cpu (- GC) Time = .467 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.1
Load Average After   = 1.1
Average Load Average = 1.1
------------------------------------------------------------
------------------------------------------------------------
TAKL: Takai test with lists
Timing performed on DEC-20
23-Mar-84 05:14:02 .
........................................
Cpu (- GC) Time = 2.527 secs
Elapsed Time = 2.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.1
Load Average After   = 1.1
Average Load Average = 1.1
------------------------------------------------------------
------------------------------------------------------------
TAKR: Takai test--Gross Version with Lots of functions
Timing performed on DEC-20
23-Mar-84 05:14:50 .
........................................
Cpu (- GC) Time = .599 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.2
Average Load Average = 1.2
------------------------------------------------------------
------------------------------------------------------------
STAK: Takai test using fluid binding
Timing performed on DEC-20
23-Mar-84 05:14:54 .
........................................
Cpu (- GC) Time = 2.694 secs
Elapsed Time = 2.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.2
Average Load Average = 1.2
------------------------------------------------------------
------------------------------------------------------------
CTAK: Takai test using catch and throw
Timing performed on DEC-20
23-Mar-84 05:15:00 .
........................................
Cpu (- GC) Time = 2.971 secs
Elapsed Time = 3.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.2
Average Load Average = 1.2
------------------------------------------------------------
|||||||||||||||FPOLY benchmark, N = 2|||||||||||||||
------------------------------------------------------------
FPOLY test 1
Timing performed on DEC-20
23-Mar-84 05:15:36 .
........................................
Cpu (- GC) Time = .022 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.1
Load Average After   = 1.1
Average Load Average = 1.1
------------------------------------------------------------
------------------------------------------------------------
FPOLY test 2
Timing performed on DEC-20
23-Mar-84 05:15:38 .
........................................
Cpu (- GC) Time = .039 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.1
Load Average After   = 1.1
Average Load Average = 1.1
------------------------------------------------------------
------------------------------------------------------------
FPOLY test 3
Timing performed on DEC-20
23-Mar-84 05:15:40 .
........................................
Cpu (- GC) Time = .023 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.2
Average Load Average = 1.2
------------------------------------------------------------
|||||||||||||||FPOLY benchmark, N = 5|||||||||||||||
------------------------------------------------------------
FPOLY test 1
Timing performed on DEC-20
23-Mar-84 05:15:42 .
........................................
Cpu (- GC) Time = .094 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.2
Average Load Average = 1.2
------------------------------------------------------------
------------------------------------------------------------
FPOLY test 2
Timing performed on DEC-20
23-Mar-84 05:15:44 .
........................................
Cpu (- GC) Time = .378 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.2
Average Load Average = 1.2
------------------------------------------------------------
------------------------------------------------------------
FPOLY test 3
Timing performed on DEC-20
23-Mar-84 05:15:46 .
........................................
Cpu (- GC) Time = .137 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.2
Average Load Average = 1.2
------------------------------------------------------------
|||||||||||||||FPOLY benchmark, N = 10|||||||||||||||
------------------------------------------------------------
FPOLY test 1
Timing performed on DEC-20
23-Mar-84 05:15:48 .
........................................
Cpu (- GC) Time = .921 secs
Elapsed Time = 1.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.2
Average Load Average = 1.2
------------------------------------------------------------
------------------------------------------------------------
FPOLY test 2
Timing performed on DEC-20
23-Mar-84 05:15:51 .
........................................
Cpu (- GC) Time = 6.46 secs
Elapsed Time = 6.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.2
Average Load Average = 1.2
------------------------------------------------------------
------------------------------------------------------------
FPOLY test 3
Timing performed on DEC-20
23-Mar-84 05:15:59 .
........................................
Cpu (- GC) Time = 1.509 secs
Elapsed Time = 2.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.2
Average Load Average = 1.2
------------------------------------------------------------
|||||||||||||||FPOLY benchmark, N = 15|||||||||||||||
------------------------------------------------------------
FPOLY test 1
Timing performed on DEC-20
23-Mar-84 05:16:03 .
........................................
Cpu (- GC) Time = 12.68 secs
Elapsed Time = 16.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.2
Load Average After   = 1.2
Average Load Average = 1.2
------------------------------------------------------------
------------------------------------------------------------
FPOLY test 2
Timing performed on DEC-20
23-Mar-84 05:16:21 .
*** Garbage collection starting
*** GC 14: time 1481 ms, 239069 recovered, 239086 free
*** Garbage collection starting
*** GC 15: time 1623 ms, 234158 recovered, 234173 free
*** Garbage collection starting
*** GC 16: time 1607 ms, 231165 recovered, 231166 free
........................................
Cpu (- GC) Time = 68.195 secs
Elapsed Time = 81.0 secs
GC Time = 4.711 secs
Load Average Before  = 1.2
Load Average After   = 1.1
Average Load Average = 1.15
------------------------------------------------------------
------------------------------------------------------------
FPOLY test 3
Timing performed on DEC-20
23-Mar-84 05:17:44 .
........................................
Cpu (- GC) Time = 11.138 secs
Elapsed Time = 12.0 secs
GC Time = 0.0 secs
Load Average Before  = 1.1
Load Average After   = 1.1
Average Load Average = 1.1
------------------------------------------------------------
-------

∂30-Apr-84  1241	KESSLER@UTAH-20.ARPA 	Cray timings 
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 30 Apr 84  12:38:02 PDT
Date: Mon 30 Apr 84 13:38:43-MDT
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: Cray timings
To: rpg@SU-AI.ARPA, Griss@UTAH-20.ARPA

Here are the numbers as of this morning.  Note, about 3 tests don't
work due to missing function wgetv, which Wayne is trying to correct,
defstruct isn't on the cray yet, so another one doesn't work.  Finally,
the 2nd fploy test is incorrect, since it requires big nums, which also
isn't on there yet.  I'll forward the wgetv numbers if and when I get 
them.  At least this will give a flavor of the speed.  Notice I edited out
the function definitions, so you can extract what you need (I hope).

Bob.
-----------------
30-Apr-84 11:06:56-MDT,108901;000000000001
Mail-From: NOT-LOGGED-IN created at 30-Apr-84 11:04:06
Return-Path: <jwa@lanl>
Received: from lanl by UTAH-20.ARPA with TCP; Mon 30 Apr 84 11:04:08-MDT
Date: 30 Apr 1984 10:39:44-MDT
From: Wayne Anderson C-10 <jwa@lanl>
Reply-to: jwa@lanl
To: kessler@utah-20
Subject: here comes d' timings


 08:16:00 000:10.923 *timepsl / 5 1.1  
 08:16:05 000:11.467  Timed Psl, No-Date-Yet   
 08:16:10 000:11.987 1 lisp> (off usermode pgwd plap pcode)    
 08:16:13 000:13.030   
 08:16:15 000:13.549 NIL$2 π
 08:16:17 000:14.068 2 lisp> (dskin "timeboye")
 08:17:57 000:55.326 ------------------------------------------------------------$2 ε
 08:17:58 000:55.847 Boyer Test
 08:18:01 000:56.367 Timing performed on CRAY  
 08:18:04 000:57.500 23-apr-84 12:00:00.$2 π
 08:18:06 000:58.020 *** Garbage collection starting   
 08:18:09 000:59.491 *** GC 22: time 800043 ms 
 08:18:10 001:00.012 *** 85496 recovered, 23059 stable, 41444 active, 1 free   
 08:18:13 001:00.943 *** Garbage collection starting   
 08:18:16 001:02.433 *** GC 23: time 815577 ms 
 08:18:18 001:02.954 *** 69972 recovered, 41359 stable, 38668 active, 1 free   
 08:18:19 001:03.808 *** Garbage collection starting   
 08:18:22 001:05.295 *** GC 24: time 814070 ms 
 08:18:24 001:05.817 *** 44664 recovered, 80303 stable, 25032 active, 1 free   
 08:18:26 001:06.550 *** Garbage collection starting   
 08:18:29 001:08.083 *** GC 25: time 853295 ms 
 08:18:31 001:08.605 *** 34792 recovered, 80843 stable, 34364 active, 1 free   
 08:18:32 001:09.292 *** Garbage collection starting   
 08:18:35 001:10.996 *** GC 26: time 996760 ms 
 08:18:37 001:11.518 *** 31768 recovered, 34009 stable, 84222 active, 1 free   
 08:18:39 001:12.191 *** Garbage collection starting   
 08:18:42 001:13.693 *** GC 27: time 826350 ms 
 08:18:44 001:14.215 *** 23884 recovered, 108113 stable, 18002 active, 1 free  
 08:18:46 001:14.850 *** Garbage collection starting   
 08:18:49 001:16.373 *** GC 28: time 844073 ms 
 08:18:50 001:16.895 *** 21312 recovered, 105603 stable, 23084 active, 1 free  
 08:18:52 001:17.518 *** Garbage collection starting   
 08:18:55 001:19.008 *** GC 29: time 816474 ms 
 08:18:56 001:19.530 *** 12424 recovered, 128947 stable, 8628 active, 1 free   
 08:18:58 001:20.111 *** Garbage collection starting   
 08:19:01 001:21.612 *** GC 30: time 826154 ms 
 08:19:03 001:22.134 *** 10028 recovered, 129453 stable, 10518 active, 1 free  
 08:19:05 001:22.668   
 08:19:07 001:23.188 ........................................  
 08:19:08 001:23.710 Cpu (- GC) Time = 1852.68500000 secs$2 ε
 08:19:10 001:24.230 Elapsed Time = 0. secs    
 08:19:11 001:24.751 GC Time = 7592.79600000 secs$2 ε
 08:19:13 001:25.272 Load Average Before  = 0  
 08:19:15 001:25.792 Load Average After   = 0  
 08:19:16 001:26.312 Average Load Average = 0. 
 08:19:19 001:26.834 ------------------------------------------------------------$2 ε
 08:19:21 001:27.355 NIL$2 π
 08:19:24 001:27.934 NIL$2 π
 08:19:25 001:28.454 3 lisp> (exitlisp)
 08:19:28 001:29.253 *timepsl / 5 1.1  
 08:19:30 001:29.779  Timed Psl, No-Date-Yet   
 08:19:32 001:30.299 1 lisp> (off usermode pgwd plap pcode)    
 08:19:35 001:31.341   
 08:19:37 001:31.861 NIL$2 π
 08:19:39 001:32.380 2 lisp> (dskin "timebrow")
 08:20:48 001:56.360 ------------------------------------------------------------$2 ε
 08:20:50 001:56.881 browse    
 08:20:52 001:57.400 Timing performed on CRAY  
 08:20:55 001:58.683 23-apr-84 12:00:00.$2 π
 08:20:56 001:59.203 *** Garbage collection starting   
 08:21:00 002:00.540 *** GC 21: time 687468 ms 
 08:21:03 002:01.062 *** 119828 recovered, 22779 stable, 7392 active, 1 free   
 08:21:06 002:02.254 *** Garbage collection starting   
 08:21:11 002:03.562 *** GC 22: time 662972 ms 
 08:21:14 002:04.083 *** 119850 recovered, 30131 stable, 18 active, 1 free$2 ¬
 08:21:18 002:05.273 *** Garbage collection starting   
 08:21:21 002:06.580 *** GC 23: time 662295 ms 
 08:21:23 002:07.102 *** 119856 recovered, 30107 stable, 36 active, 1 free$2 ¬
 08:21:26 002:08.296 *** Garbage collection starting   
 08:21:30 002:09.604 *** GC 24: time 662969 ms 
 08:21:31 002:10.126 *** 119870 recovered, 30081 stable, 48 active, 1 free$2 ¬
 08:21:34 002:11.315 *** Garbage collection starting   
 08:21:37 002:12.624 *** GC 25: time 663993 ms 
 08:21:39 002:13.145 *** 119918 recovered, 30057 stable, 24 active, 1 free$2 ¬
 08:21:42 002:14.336 *** Garbage collection starting   
 08:21:45 002:15.645 *** GC 26: time 663804 ms 
 08:21:47 002:16.166 *** 119936 recovered, 30033 stable, 30 active, 1 free$2 ¬
 08:21:50 002:17.360 *** Garbage collection starting   
 08:21:53 002:18.667 *** GC 27: time 662206 ms 
 08:21:55 002:19.188 *** 119962 recovered, 30007 stable, 30 active, 1 free$2 ¬
 08:21:58 002:20.381 *** Garbage collection starting   
 08:22:02 002:21.687 *** GC 28: time 661157 ms 
 08:22:04 002:22.209 *** 119996 recovered, 29983 stable, 20 active, 1 free$2 ¬
 08:22:06 002:22.803   
 08:22:08 002:23.323 ........................................  
 08:22:11 002:23.844 Cpu (- GC) Time = 4676.80400000 secs$2 ε
 08:22:13 002:24.365 Elapsed Time = 0. secs    
 08:22:17 002:24.885 GC Time = 5326.86400000 secs$2 ε
 08:22:19 002:25.406 Load Average Before  = 0  
 08:22:21 002:25.926 Load Average After   = 0  
 08:22:24 002:26.447 Average Load Average = 0. 
 08:22:28 002:26.968 ------------------------------------------------------------$2 ε
 08:22:30 002:27.489 NIL$2 π
 08:22:32 002:28.039 NIL$2 π
 08:22:33 002:28.559 3 lisp> (exitlisp)
 08:22:36 002:29.358 *timepsl / 5 1.1  
 08:22:38 002:29.884  Timed Psl, No-Date-Yet   
 08:22:40 002:30.404 1 lisp> (off usermode pgwd plap pcode)    
 08:22:43 002:31.446   
 08:22:45 002:31.965 NIL$2 π
 08:22:47 002:32.485 2 lisp> (dskin "timedder")
 08:23:53 002:51.588 ------------------------------------------------------------$2 ε
 08:23:55 002:52.110 DDeriv Test, also same as FDDeriv 
 08:23:57 002:52.631 Timing performed on CRAY  
 08:23:59 002:53.561 23-apr-84 12:00:00.$2 π
 08:24:01 002:54.082 *** Garbage collection starting   
 08:24:04 002:55.347 *** GC 21: time 626749 ms 
 08:24:06 002:55.870 *** 127296 recovered, 22631 stable, 72 active, 1 free$2 ¬
 08:24:09 002:56.801 *** Garbage collection starting   
 08:24:13 002:58.066 *** GC 22: time 627053 ms 
 08:24:15 002:58.588 *** 127296 recovered, 22631 stable, 72 active, 1 free$2 ¬
 08:24:18 002:59.520 *** Garbage collection starting   
 08:24:21 003:00.785 *** GC 23: time 626535 ms 
 08:24:23 003:01.306 *** 127296 recovered, 22631 stable, 72 active, 1 free$2 ¬
 08:24:26 003:02.238 *** Garbage collection starting   
 08:24:29 003:03.503 *** GC 24: time 626476 ms 
 08:24:31 003:04.024 *** 127296 recovered, 22631 stable, 72 active, 1 free$2 ¬
 08:24:32 003:04.579   
 08:24:34 003:05.099 ........................................  
 08:24:36 003:05.621 Cpu (- GC) Time = 1422.06800000 secs$2 ε
 08:24:37 003:06.141 Elapsed Time = 0. secs    
 08:24:39 003:06.662 GC Time = 2506.81300000 secs$2 ε
 08:24:41 003:07.182 Load Average Before  = 0  
 08:24:43 003:07.703 Load Average After   = 0  
 08:24:45 003:08.223 Average Load Average = 0. 
 08:24:47 003:08.744 ------------------------------------------------------------$2 ε
 08:24:50 003:09.265 NIL$2 π
 08:24:52 003:09.830 NIL$2 π
 08:24:55 003:10.350 3 lisp> (exitlisp)
 08:24:58 003:11.149 *timepsl / 5 1.1  
 08:25:00 003:11.675  Timed Psl, No-Date-Yet   
 08:25:01 003:12.195 1 lisp> (off usermode pgwd plap pcode)    
 08:25:05 003:13.238   
 08:25:07 003:13.757 NIL$2 π
 08:25:08 003:14.276 2 lisp> (dskin "timederi")
 08:26:08 003:30.406 ------------------------------------------------------------$2 ε
 08:26:10 003:30.927 Deriv Test.$2 π
 08:26:12 003:31.447 Timing performed on CRAY  
 08:26:14 003:32.360 23-apr-84 12:00:00.$2 π
 08:26:16 003:32.880 *** Garbage collection starting   
 08:26:19 003:34.145 *** GC 21: time 626479 ms 
 08:26:20 003:34.666 *** 127302 recovered, 22615 stable, 82 active, 1 free$2 ¬
 08:26:23 003:35.582 *** Garbage collection starting   
 08:26:26 003:36.846 *** GC 22: time 625946 ms 
 08:26:28 003:37.367 *** 127302 recovered, 22615 stable, 82 active, 1 free$2 ¬
 08:26:31 003:38.279 *** Garbage collection starting   
 08:26:34 003:39.543 *** GC 23: time 625489 ms 
 08:26:36 003:40.064 *** 127302 recovered, 22615 stable, 82 active, 1 free$2 ¬
 08:26:38 003:40.918   
 08:26:40 003:41.438 ........................................  
 08:26:42 003:41.959 Cpu (- GC) Time = 1279.98200000 secs$2 ε
 08:26:44 003:42.480 Elapsed Time = 0. secs    
 08:26:45 003:43.000 GC Time = 1877.91400000 secs$2 ε
 08:26:47 003:43.521 Load Average Before  = 0  
 08:26:49 003:44.041 Load Average After   = 0  
 08:26:50 003:44.561 Average Load Average = 0. 
 08:26:52 003:45.083 ------------------------------------------------------------$2 ε
 08:26:54 003:45.604 NIL$2 π
 08:26:56 003:46.187 NIL$2 π
 08:26:58 003:46.707 3 lisp> (exitlisp)
 08:27:00 003:47.506 *timepsl / 5 1.1  
 08:27:02 003:48.032  Timed Psl, No-Date-Yet   
 08:27:04 003:48.552 1 lisp> (off usermode pgwd plap pcode)    
 08:27:08 003:49.594   
 08:27:09 003:50.113 NIL$2 π
 08:27:11 003:50.633 2 lisp> (dskin "timedest")
 08:27:53 004:04.818 ------------------------------------------------------------$2 ε
 08:27:55 004:05.339 `Destructive' Test
 08:27:57 004:05.860 Timing performed on CRAY  
 08:28:00 004:06.916 23-apr-84 12:00:00.$2 π
 08:28:01 004:07.437 ........................................  
 08:28:05 004:07.958 Cpu (- GC) Time = 451.72600000 secs$2 π
 08:28:07 004:08.479 Elapsed Time = 0. secs    
 08:28:09 004:08.999 GC Time = 0. secs 
 08:28:10 004:09.519 Load Average Before  = 0  
 08:28:12 004:10.039 Load Average After   = 0  
 08:28:15 004:10.560 Average Load Average = 0. 
 08:28:17 004:11.081 ------------------------------------------------------------$2 ε
 08:28:21 004:11.602 NIL$2 π
 08:28:22 004:12.144 NIL$2 π
 08:28:26 004:12.664 3 lisp> (exitlisp)
 08:28:30 004:13.463 *timepsl / 5 1.1  
 08:28:32 004:13.989  Timed Psl, No-Date-Yet   
 08:28:33 004:14.509 1 lisp> (off usermode pgwd plap pcode)    
 08:28:37 004:15.551   
 08:28:38 004:16.070 NIL$2 π
 08:28:40 004:16.590 2 lisp> (dskin "time-div")
 08:29:53 004:34.751 ------------------------------------------------------------$2 ε
 08:29:54 004:35.273 Div test 1, iterative version of div$2 ε
 08:29:56 004:35.793 Timing performed on CRAY  
 08:29:58 004:36.675 23-apr-84 12:00:00.$2 π
 08:30:00 004:37.195 *** Garbage collection starting   
 08:30:03 004:38.476 *** GC 21: time 639911 ms 
 08:30:06 004:38.997 *** 126800 recovered, 23019 stable, 180 active, 1 free    
 08:30:10 004:39.843   
 08:30:13 004:40.363 ........................................  
 08:30:17 004:40.884 Cpu (- GC) Time = 580.90000000 secs$2 π
 08:30:18 004:41.405 Elapsed Time = 0. secs    
 08:30:20 004:41.925 GC Time = 639.91100000 secs$2 π
 08:30:22 004:42.446 Load Average Before  = 0  
 08:30:24 004:42.966 Load Average After   = 0  
 08:30:30 004:43.487 Average Load Average = 0. 
 08:30:32 004:44.008 ------------------------------------------------------------$2 ε
 08:30:37 004:44.529 NIL$2 π
 08:30:42 004:45.080 *** Garbage collection starting   
 08:30:46 004:46.299 *** GC 22: time 588419 ms 
 08:30:48 004:46.821 *** 113235 recovered, 23007 stable, 12 active, 1 free$2 ¬
 08:30:50 004:47.341   
 08:30:51 004:47.861 ------------------------------------------------------------$2 ε
 08:30:55 004:48.384 Div test 2, recursive version of div$2 ε
 08:30:57 004:48.905 Timing performed on CRAY  
 08:31:00 004:49.785 23-apr-84 12:00:00.$2 π
 08:31:03 004:50.305 *** Garbage collection starting   
 08:31:07 004:51.586 *** GC 23: time 640191 ms 
 08:31:09 004:52.108 *** 126800 recovered, 23019 stable, 180 active, 1 free    
 08:31:11 004:52.948   
 08:31:15 004:53.468 ........................................  
 08:31:18 004:53.989 Cpu (- GC) Time = 575.17000000 secs$2 π
 08:31:25 004:54.510 Elapsed Time = 0. secs    
 08:31:33 004:55.031 GC Time = 640.19100000 secs$2 π
 08:31:43 004:55.551 Load Average Before  = 0  
 08:31:44 004:56.072 Load Average After   = 0  
 08:31:46 004:56.592 Average Load Average = 0. 
 08:31:48 004:57.113 ------------------------------------------------------------$2 ε
 08:31:51 004:57.634 NIL$2 π
 08:31:54 004:58.190 NIL$2 π
 08:31:56 004:58.710 3 lisp> (exitlisp)
 08:32:00 004:59.509 *timepsl / 5 1.1  
 08:32:01 005:00.035  Timed Psl, No-Date-Yet   
 08:32:03 005:00.555 1 lisp> (off usermode pgwd plap pcode)    
 08:32:07 005:01.597   
 08:32:08 005:02.116 NIL$2 π
 08:32:14 005:02.636 2 lisp> (dskin "time-fft")
 08:33:33 005:19.424 ------------------------------------------------------------$2 ε
 08:33:36 005:19.945 FFT Test  
 08:33:41 005:20.465 Timing performed on CRAY  
 08:33:47 005:20.986 23-apr-84 12:00:00.Undefined Function WGETV called with 2 
 08:33:48 005:21.507  args from compiled code  
 08:33:50 005:21.787 (exitlisp)
 08:33:50 005:21.787  ----- 020  warning - previous line not used$2 ε
 08:33:50 005:21.789 *timepsl / 5 1.1  
 08:33:54 005:22.315  Timed Psl, No-Date-Yet   
 08:33:58 005:22.835 1 lisp> (off usermode pgwd plap pcode)    
 08:34:04 005:23.877   
 08:34:06 005:24.396 NIL$2 π
 08:34:08 005:24.916 2 lisp> (dskin "timefpri")
 08:50:20 008:15.931 ------------------------------------------------------------$2 ε
 08:50:22 008:16.452 Fprint Test$2 π
 08:50:25 008:16.972 Timing performed on CRAY  
 08:50:39 008:21.866 23-apr-84 12:00:00.$2 π
 08:50:41 008:22.386 ........................................  
 08:50:43 008:22.907 Cpu (- GC) Time = 3617.90000000 secs$2 ε
 08:50:44 008:23.428 Elapsed Time = 0. secs    
 08:50:48 008:23.948 GC Time = 0. secs 
 08:50:51 008:24.468 Load Average Before  = 0  
 08:50:53 008:24.988 Load Average After   = 0  
 08:50:55 008:25.509 Average Load Average = 0. 
 08:50:58 008:26.030 ------------------------------------------------------------$2 ε
 08:51:02 008:26.551 NIL$2 π
 08:51:06 008:27.123 NIL$2 π
 08:51:13 008:27.643 3 lisp> (exitlisp)
 08:51:19 008:28.442 *timepsl / 5 1.1  
 08:51:21 008:28.968  Timed Psl, No-Date-Yet   
 08:51:23 008:29.489 1 lisp> (off usermode pgwd plap pcode)    
 08:51:26 008:30.531   
 08:51:32 008:31.051 NIL$2 π
 08:51:35 008:31.571 2 lisp> (dskin "timefrea")
 08:52:55 008:41.370 ------------------------------------------------------------$2 ε
 08:52:56 008:41.891 fread test
 08:52:58 008:42.411 Timing performed on CRAY  
 08:53:59 009:07.241 23-apr-84 12:00:00.$2 π
 08:54:02 009:07.761 ........................................  
 08:54:04 009:08.283 Cpu (- GC) Time = 2.04078770e+04 secs$2 ¬
 08:54:06 009:08.803 Elapsed Time = 0. secs    
 08:54:08 009:09.324 GC Time = 0. secs 
 08:54:10 009:09.844 Load Average Before  = 0  
 08:54:12 009:10.364 Load Average After   = 0  
 08:54:15 009:10.885 Average Load Average = 0. 
 08:54:17 009:11.406 ------------------------------------------------------------$2 ε
 08:54:19 009:11.927 NIL$2 π
 08:54:21 009:12.477 NIL$2 π
 08:54:23 009:12.997 3 lisp> (exitlisp)
 08:54:26 009:13.796 *timepsl / 5 1.1  
 08:54:27 009:14.322  Timed Psl, No-Date-Yet   
 08:54:29 009:14.842 1 lisp> (off usermode pgwd plap pcode)    
 08:54:35 009:15.884   
 08:54:39 009:16.403 NIL$2 π
 08:54:43 009:16.923 2 lisp> (dskin "timepuzz")
 08:56:30 009:45.486 ------------------------------------------------------------$2 ε
 08:56:32 009:46.007 Puzzle test$2 π
 08:56:34 009:46.527 Timing performed on CRAY  
 08:56:38 009:47.049 23-apr-84 12:00:00.Undefined Function WPUTV called with 3 
 08:56:40 009:47.570  args from compiled code  
 08:56:41 009:47.850 (exitlisp)
 08:56:41 009:47.850  ----- 020  warning - previous line not used$2 ε
 08:56:41 009:47.851 *timepsl / 5 1.1  
 08:56:42 009:48.377  Timed Psl, No-Date-Yet   
 08:56:44 009:48.897 1 lisp> (off usermode pgwd plap pcode)    
 08:56:48 009:49.939   
 08:56:49 009:50.458 NIL$2 π
 08:56:51 009:50.978 2 lisp> (dskin "timetpri")
 09:04:02 011:21.055 ------------------------------------------------------------$2 ε
 09:04:04 011:21.576 tprint test$2 π
 09:04:06 011:22.096 Timing performed on CRAY  
 09:04:09 011:22.617 23-apr-84 12:00:00.((((((!678E !567D !567D !456D !456D !345C) !234B (!567D$2 λ
 09:04:11 011:23.140 !456D !456D !345C !345C !234B) !123A (!456D !345C !345C !234B !234B !123A)$2 λ
 09:10:53 012:34.798   
 09:10:55 012:35.318 ........................................  
 09:11:03 012:35.839 Cpu (- GC) Time = 189.95300000 secs$2 π
 09:11:05 012:36.359 Elapsed Time = 0. secs    
 09:11:07 012:36.880 GC Time = 0. secs 
 09:11:09 012:37.400 Load Average Before  = 0  
 09:11:11 012:37.920 Load Average After   = 0  
 09:11:13 012:38.440 Average Load Average = 0. 
 09:11:15 012:38.961 ------------------------------------------------------------$2 ε
 09:11:17 012:39.482 NIL$2 π
 09:11:20 012:40.071 NIL$2 π
 09:11:23 012:40.591 3 lisp> (exitlisp)
 09:11:26 012:41.390 *timepsl / 5 1.1  
 09:11:35 012:41.916  Timed Psl, No-Date-Yet   
 09:11:39 012:42.436 1 lisp> (off usermode pgwd plap pcode)    
 09:11:48 012:43.478   
 09:11:52 012:43.998 NIL$2 π
 09:11:55 012:44.517 2 lisp> (dskin "timetrav")
 09:12:04 012:46.604 Should be loading (TIMER USEFUL)  
 09:12:08 012:47.125 NIL$2 π
 09:12:15 012:47.667 NIL$2 π
 09:12:20 012:48.211 NIL$2 π
 09:12:21 012:48.742 Should be loading (NUMERIC-OPERATORS)$2 ¬
 09:12:23 012:49.262 NIL$2 π
 09:12:25 012:49.791 NIL$2 π
 09:12:27 012:50.358 Should be loading (NSTRUCT)$2 π
 09:12:30 012:50.878 NIL$2 π
 09:12:40 012:51.423 NIL$2 π
 09:12:43 012:52.021 ***** `DEFSTRUCT' is an undefined function
 09:12:45 012:52.544 ***** Continuable error, retry form is:   
 09:12:47 012:53.066 (DEFSTRUCT (NODE FAST-VECTOR) (PARENTS NIL) (SONS NIL) (SN (SNB)) (ENTRY1 
 09:12:48 012:53.590 NIL) (ENTRY2 NIL) (ENTRY3 NIL) (ENTRY4 NIL) (ENTRY5 NIL) (ENTRY6 NIL) (MARK$2 π
 09:12:59 012:54.111 NIL))$2 ¬
 09:13:03 012:54.630 Break loop
 09:13:10 012:55.151 3 lisp break>> (exitlisp) 
 09:13:16 012:55.950 *timepsl / 5 1.1  
 09:13:18 012:56.476  Timed Psl, No-Date-Yet   
 09:13:19 012:56.996 1 lisp> (off usermode pgwd plap pcode)    
 09:13:23 012:58.039   
 09:13:24 012:58.558 NIL$2 π
 09:13:26 012:59.077 2 lisp> (dskin "timetria")
 09:15:10 013:21.873 ------------------------------------------------------------$2 ε
 09:15:12 013:22.394 Triang test$2 π
 09:15:14 013:22.914 Timing performed on CRAY  
 09:15:15 013:23.436 23-apr-84 12:00:00.Undefined Function WGETV called with 2 
 09:15:17 013:23.957  args from compiled code  
 09:15:18 013:24.237 (exitlisp)
 09:15:18 013:24.237  ----- 020  warning - previous line not used$2 ε
 09:15:18 013:24.238 *timepsl / 5 1.1  
 09:15:20 013:24.764  Timed Psl, No-Date-Yet   
 09:15:23 013:25.294 1 lisp> (off usermode pgwd plap pcode)    
 09:15:27 013:26.336   
 09:15:29 013:26.855 NIL$2 π
 09:15:31 013:27.375 2 lisp> (dskin "time-tak")
 09:16:42 013:42.221 ------------------------------------------------------------$2 ε
 09:16:47 013:42.743 TAK: Takai test, (TAK 18 12 6)    
 09:16:49 013:43.264 Timing performed on CRAY  
 09:16:51 013:43.837 23-apr-84 12:00:00.$2 π
 09:16:53 013:44.358 ........................................  
 09:16:55 013:44.879 Cpu (- GC) Time = 44.68900000 secs
 09:16:57 013:45.399 Elapsed Time = 0. secs    
 09:16:59 013:45.920 GC Time = 0. secs 
 09:17:01 013:46.440 Load Average Before  = 0  
 09:17:03 013:46.960 Load Average After   = 0  
 09:17:08 013:47.480 Average Load Average = 0. 
 09:17:10 013:48.002 ------------------------------------------------------------$2 ε
 09:17:12 013:48.523 NIL$2 π
 09:17:14 013:49.066 *** Garbage collection starting   
 09:17:16 013:49.785 *** GC 21: time 166959 ms 
 09:17:18 013:50.307 *** 35 recovered, 22603 stable, 12 active, 1 free 
 09:17:20 013:50.827   
 09:17:22 013:51.348 ------------------------------------------------------------$2 ε
 09:17:23 013:51.870 TAK: Takai test, (tak 10018 10012 10006)  
 09:17:25 013:52.390 Timing performed on CRAY  
 09:17:29 013:52.964 23-apr-84 12:00:00.$2 π
 09:17:30 013:53.485 ........................................  
 09:17:32 013:54.006 Cpu (- GC) Time = 45.01200000 secs
 09:17:35 013:54.527 Elapsed Time = 0. secs    
 09:17:37 013:55.047 GC Time = 0. secs 
 09:17:39 013:55.567 Load Average Before  = 0  
 09:17:41 013:56.087 Load Average After   = 0  
 09:17:43 013:56.608 Average Load Average = 0. 
 09:17:45 013:57.129 ------------------------------------------------------------$2 ε
 09:19:03 014:07.748 ------------------------------------------------------------$2 ε
 09:19:05 014:08.269 TAKL: Takai test with lists$2 π
 09:19:06 014:08.790 Timing performed on CRAY  
 09:19:09 014:09.674 23-apr-84 12:00:00.$2 π
 09:19:11 014:10.195 ........................................  
 09:19:13 014:10.716 Cpu (- GC) Time = 306.72600000 secs$2 π
 09:19:14 014:11.237 Elapsed Time = 0. secs    
 09:19:16 014:11.757 GC Time = 0. secs 
 09:19:18 014:12.277 Load Average Before  = 0  
 09:19:20 014:12.797 Load Average After   = 0  
 09:19:22 014:13.318 Average Load Average = 0. 
 09:19:24 014:13.839 ------------------------------------------------------------$2 ε
 09:26:32 015:46.151 ------------------------------------------------------------$2 ε
 09:26:34 015:46.673 TAKR: Takai test--Gross Version with Lots of functions    
 09:26:36 015:47.194 Timing performed on CRAY  
 09:26:38 015:47.796 23-apr-84 12:00:00.$2 π
 09:26:40 015:48.317 ........................................  
 09:26:42 015:48.838 Cpu (- GC) Time = 69.02000000 secs
 09:26:44 015:49.359 Elapsed Time = 0. secs    
 09:26:46 015:49.879 GC Time = 0. secs 
 09:26:48 015:50.399 Load Average Before  = 0  
 09:26:49 015:50.919 Load Average After   = 0  
 09:26:51 015:51.440 Average Load Average = 0. 
 09:26:53 015:51.961 ------------------------------------------------------------$2 ε
 09:27:31 016:00.979 ------------------------------------------------------------$2 ε
 09:27:33 016:01.501 STAK: Takai test using fluid binding$2 ε
 09:27:34 016:02.022 Timing performed on CRAY  
 09:27:41 016:03.871 23-apr-84 12:00:00.$2 π
 09:27:43 016:04.391 ........................................  
 09:27:53 016:04.913 Cpu (- GC) Time = 1119.51800000 secs$2 ε
 09:27:55 016:05.433 Elapsed Time = 0. secs    
 09:27:56 016:05.953 GC Time = 0. secs 
 09:27:58 016:06.473 Load Average Before  = 0  
 09:28:00 016:06.994 Load Average After   = 0  
 09:28:02 016:07.514 Average Load Average = 0. 
 09:28:03 016:08.035 ------------------------------------------------------------$2 ε
 09:28:48 016:16.033   
 09:28:59 016:16.554 ------------------------------------------------------------$2 ε
 09:29:01 016:17.075 CTAK: Takai test using catch and throw    
 09:29:11 016:17.596 Timing performed on CRAY  
 09:29:23 016:18.836 23-apr-84 12:00:00.$2 π
 09:29:25 016:19.357 ........................................  
 09:29:26 016:19.878 Cpu (- GC) Time = 606.33700000 secs$2 π
 09:29:28 016:20.398 Elapsed Time = 0. secs    
 09:29:30 016:20.918 GC Time = 0. secs 
 09:29:31 016:21.439 Load Average Before  = 0  
 09:29:33 016:21.959 Load Average After   = 0  
 09:29:35 016:22.479 Average Load Average = 0. 
 09:29:37 016:23.001 ------------------------------------------------------------$2 ε
 09:29:39 016:23.521 NIL$2 π
 09:29:40 016:24.064 NIL$2 π
 09:29:42 016:24.584 3 lisp> (exitlisp)
 09:29:44 016:25.383 *timepsl / 5 1.1  
 09:29:46 016:25.909  Timed Psl, No-Date-Yet   
 09:29:48 016:26.429 1 lisp> (off usermode pgwd plap pcode)    
 09:29:52 016:27.471   
 09:29:54 016:27.990 NIL$2 π
 09:29:55 016:28.510 2 lisp> (dskin "timefrpo")
 09:32:49 017:12.800 ------------------------------------------------------------$2 ε
 09:33:02 017:13.321 FPOLY test 1$2 ε
 09:33:05 017:13.841 Timing performed on CRAY  
 09:33:07 017:14.363 23-apr-84 12:00:00.$2 π
 09:33:08 017:14.884 ........................................  
 09:33:10 017:15.405 Cpu (- GC) Time = 1.31200000 secs 
 09:33:12 017:15.926 Elapsed Time = 0. secs    
 09:33:13 017:16.446 GC Time = 0. secs 
 09:33:23 017:16.966 Load Average Before  = 0  
 09:33:32 017:17.486 Load Average After   = 0  
 09:33:36 017:18.007 Average Load Average = 0. 
 09:33:43 017:18.528 ------------------------------------------------------------$2 ε
 09:33:45 017:19.050 *** Garbage collection starting   
 09:33:48 017:19.772 *** GC 23: time 169330 ms 
 09:33:49 017:20.293 *** 155 recovered, 22845 stable, 70 active, 1 free
 09:33:51 017:20.813   
 09:33:53 017:21.334 ------------------------------------------------------------$2 ε
 09:33:54 017:21.855 FPOLY test 2$2 ε
 09:33:56 017:22.375 Timing performed on CRAY  
 09:33:58 017:22.897 23-apr-84 12:00:00.$2 π
 09:34:00 017:23.418 ........................................  
 09:34:01 017:23.939 Cpu (- GC) Time = 1.31000000 secs 
 09:34:03 017:24.459 Elapsed Time = 0. secs    
 09:34:05 017:24.980 GC Time = 0. secs 
 09:34:07 017:25.500 Load Average Before  = 0  
 09:34:09 017:26.020 Load Average After   = 0  
 09:34:10 017:26.541 Average Load Average = 0. 
 09:34:12 017:27.062 ------------------------------------------------------------$2 ε
 09:34:14 017:27.583 *** Garbage collection starting   
 09:34:16 017:28.306 *** GC 24: time 169679 ms 
 09:34:18 017:28.827 *** 225 recovered, 22845 stable, 70 active, 1 free
 09:34:20 017:29.348   
 09:34:21 017:29.868 ------------------------------------------------------------$2 ε
 09:34:23 017:30.389 FPOLY test 3$2 ε
 09:34:25 017:30.909 Timing performed on CRAY  
 09:34:29 017:31.432 23-apr-84 12:00:00.$2 π
 09:34:33 017:31.953 ........................................  
 09:34:35 017:32.474 Cpu (- GC) Time = 1.93400000 secs 
 09:34:42 017:32.994 Elapsed Time = 0. secs    
 09:34:46 017:33.515 GC Time = 0. secs 
 09:34:50 017:34.035 Load Average Before  = 0  
 09:34:55 017:34.555 Load Average After   = 0  
 09:34:57 017:35.077 Average Load Average = 0. 
 09:34:59 017:35.598 ------------------------------------------------------------$2 ε
 09:35:00 017:36.119 NIL$2 π
 09:35:02 017:36.691 |||||||||||||||FPOLY benchmark, N = 5|||||||||||||||$2 ε
 09:35:04 017:37.212 *** Garbage collection starting   
 09:35:06 017:37.934 *** GC 25: time 169627 ms 
 09:35:09 017:38.456 *** 407 recovered, 22831 stable, 14 active, 1 free
 09:35:12 017:38.976   
 09:35:15 017:39.497 ------------------------------------------------------------$2 ε
 09:35:16 017:40.018 FPOLY test 1$2 ε
 09:35:30 017:40.538 Timing performed on CRAY  
 09:35:32 017:41.074 23-apr-84 12:00:00.$2 π
 09:35:34 017:41.595 ........................................  
 09:35:35 017:42.116 Cpu (- GC) Time = 13.34000000 secs
 09:35:37 017:42.636 Elapsed Time = 0. secs    
 09:35:39 017:43.157 GC Time = 0. secs 
 09:35:41 017:43.677 Load Average Before  = 0  
 09:35:42 017:44.197 Load Average After   = 0  
 09:35:44 017:44.718 Average Load Average = 0. 
 09:35:48 017:45.239 ------------------------------------------------------------$2 ε
 09:35:58 017:45.760 *** Garbage collection starting   
 09:36:03 017:46.491 *** GC 26: time 176353 ms 
 09:36:06 017:47.012 *** 1609 recovered, 22845 stable, 344 active, 1 free$2 ε
 09:36:08 017:47.533   
 09:36:09 017:48.053 ------------------------------------------------------------$2 ε
 09:36:11 017:48.574 FPOLY test 2$2 ε
 09:36:13 017:49.094 Timing performed on CRAY  
 09:36:18 017:49.626 23-apr-84 12:00:00.$2 π
 09:36:19 017:50.147 ........................................  
 09:36:21 017:50.668 Cpu (- GC) Time = 9.60700000 secs 
 09:36:23 017:51.188 Elapsed Time = 0. secs    
 09:36:25 017:51.709 GC Time = 0. secs 
 09:36:28 017:52.229 Load Average Before  = 0  
 09:36:30 017:52.749 Load Average After   = 0  
 09:36:38 017:53.270 Average Load Average = 0. 
 09:36:41 017:53.791 ------------------------------------------------------------$2 ε
 09:36:42 017:54.312 *** Garbage collection starting   
 09:36:45 017:55.038 *** GC 27: time 172736 ms 
 09:36:46 017:55.560 *** 1263 recovered, 22845 stable, 0 active, 1 free
 09:36:48 017:56.080   
 09:36:50 017:56.601 ------------------------------------------------------------$2 ε
 09:36:52 017:57.122 FPOLY test 3$2 ε
 09:36:54 017:57.642 Timing performed on CRAY  
 09:37:00 017:58.186 23-apr-84 12:00:00.$2 π
 09:37:02 017:58.707 ........................................  
 09:37:04 017:59.228 Cpu (- GC) Time = 20.18400000 secs
 09:37:05 017:59.748 Elapsed Time = 0. secs    
 09:37:22 018:00.269 GC Time = 0. secs 
 09:37:24 018:00.789 Load Average Before  = 0  
 09:37:26 018:01.309 Load Average After   = 0  
 09:37:29 018:01.830 Average Load Average = 0. 
 09:37:31 018:02.351 ------------------------------------------------------------$2 ε
 09:37:33 018:02.872 NIL$2 π
 09:37:34 018:03.471 |||||||||||||||FPOLY benchmark, N = 10|||||||||||||||$2 ¬
 09:37:36 018:03.992 *** Garbage collection starting   
 09:37:38 018:04.725 *** GC 28: time 178647 ms 
 09:37:40 018:05.247 *** 3202 recovered, 22831 stable, 14 active, 1 free$2 π
 09:37:42 018:05.768   
 09:37:50 018:06.288 ------------------------------------------------------------$2 ε
 09:37:52 018:06.809 FPOLY test 1$2 ε
 09:37:54 018:07.329 Timing performed on CRAY  
 09:37:56 018:08.019 23-apr-84 12:00:00.$2 π
 09:37:59 018:08.539 ........................................  
 09:38:01 018:09.060 Cpu (- GC) Time = 142.37800000 secs$2 π
 09:38:03 018:09.581 Elapsed Time = 0. secs    
 09:38:06 018:10.101 GC Time = 0. secs 
 09:38:08 018:10.621 Load Average Before  = 0  
 09:38:13 018:11.141 Load Average After   = 0  
 09:38:15 018:11.662 Average Load Average = 0. 
 09:38:17 018:12.183 ------------------------------------------------------------$2 ε
 09:38:18 018:12.704 *** Garbage collection starting   
 09:38:21 018:13.509 *** GC 29: time 239308 ms 
 09:38:25 018:14.031 *** 16959 recovered, 22845 stable, 1534 active, 1 free    
 09:38:28 018:14.551   
 09:38:36 018:15.072 ------------------------------------------------------------$2 ε
 09:38:41 018:15.594 FPOLY test 2$2 ε
 09:38:43 018:16.114 Timing performed on CRAY  
 09:38:45 018:16.673 23-apr-84 12:00:00.$2 π
 09:38:47 018:17.194 ........................................  
 09:38:50 018:17.715 Cpu (- GC) Time = 33.05000000 secs
 09:38:52 018:18.235 Elapsed Time = 0. secs    
 09:38:58 018:18.755 GC Time = 0. secs 
 09:38:59 018:19.275 Load Average Before  = 0  
 09:39:01 018:19.796 Load Average After   = 0  
 09:39:07 018:20.316 Average Load Average = 0. 
 09:39:08 018:20.837 ------------------------------------------------------------$2 ε
 09:39:10 018:21.358 *** Garbage collection starting   
 09:39:12 018:22.089 *** GC 30: time 177060 ms 
 09:39:14 018:22.611 *** 2525 recovered, 22845 stable, 0 active, 1 free
 09:39:16 018:23.131   
 09:39:17 018:23.652 ------------------------------------------------------------$2 ε
 09:39:20 018:24.173 FPOLY test 3$2 ε
 09:39:24 018:24.693 Timing performed on CRAY  
 09:39:26 018:25.485 23-apr-84 12:00:00.$2 π
 09:39:30 018:26.006 ........................................  
 09:39:32 018:26.527 Cpu (- GC) Time = 229.48300000 secs$2 π
 09:39:42 018:27.047 Elapsed Time = 0. secs    
 09:39:43 018:27.568 GC Time = 0. secs 
 09:39:54 018:28.088 Load Average Before  = 0  
 09:39:57 018:28.608 Load Average After   = 0  
 09:40:00 018:29.128 Average Load Average = 0. 
 09:40:05 018:29.649 ------------------------------------------------------------$2 ε
 09:40:07 018:30.170 NIL$2 π
 09:40:09 018:30.720 |||||||||||||||FPOLY benchmark, N = 15|||||||||||||||$2 ¬
 09:40:14 018:31.242 *** Garbage collection starting   
 09:40:19 018:32.091 *** GC 31: time 276640 ms 
 09:40:29 018:32.612 *** 34997 recovered, 22831 stable, 14 active, 1 free$2 ε
 09:40:31 018:33.133   
 09:40:33 018:33.653 ------------------------------------------------------------$2 ε
 09:40:35 018:34.174 FPOLY test 1$2 ε
 09:40:38 018:34.694 Timing performed on CRAY  
 09:40:42 018:36.332 23-apr-84 12:00:00.$2 π
 09:40:43 018:36.853 ........................................  
 09:40:45 018:37.374 Cpu (- GC) Time = 941.71600000 secs$2 π
 09:40:54 018:37.894 Elapsed Time = 0. secs    
 09:40:56 018:38.414 GC Time = 0. secs 
 09:40:58 018:38.934 Load Average Before  = 0  
 09:41:01 018:39.455 Load Average After   = 0  
 09:41:04 018:39.975 Average Load Average = 0. 
 09:41:05 018:40.496 ------------------------------------------------------------$2 ε
 09:41:07 018:41.018 *** Garbage collection starting   
 09:41:10 018:42.166 *** GC 32: time 528572 ms 
 09:41:14 018:42.688 *** 93731 recovered, 22845 stable, 4074 active, 1 free    
 09:41:16 018:43.208   
 09:41:17 018:43.728 ------------------------------------------------------------$2 ε
 09:41:19 018:44.249 FPOLY test 2$2 ε
 09:41:22 018:44.769 Timing performed on CRAY  
 09:41:24 018:45.349 23-apr-84 12:00:00.$2 π
 09:41:27 018:45.870 ........................................  
 09:41:30 018:46.391 Cpu (- GC) Time = 50.18300000 secs
 09:41:33 018:46.911 Elapsed Time = 0. secs    
 09:41:34 018:47.431 GC Time = 0. secs 
 09:41:37 018:47.951 Load Average Before  = 0  
 09:41:40 018:48.472 Load Average After   = 0  
 09:41:42 018:48.992 Average Load Average = 0. 
 09:41:44 018:49.513 ------------------------------------------------------------$2 ε
 09:41:45 018:50.035 *** Garbage collection starting   
 09:41:48 018:50.778 *** GC 33: time 187040 ms 
 09:41:50 018:51.299 *** 5313 recovered, 22845 stable, 0 active, 1 free
 09:41:53 018:51.819   
 09:41:55 018:52.340 ------------------------------------------------------------$2 ε
 09:42:01 018:52.861 FPOLY test 3$2 ε
 09:42:03 018:53.381 Timing performed on CRAY  
 09:42:06 018:54.941 23-apr-84 12:00:00.$2 π
 09:42:08 018:55.461 *** Garbage collection starting   
 09:42:11 018:56.671 *** GC 34: time 580258 ms 
 09:42:13 018:57.192 *** 119796 recovered, 22845 stable, 7359 active, 1 free   
 09:42:17 018:58.576   
 09:42:19 018:59.096 ........................................  
 09:42:21 018:59.617 Cpu (- GC) Time = 1605.61400000 secs$2 ε
 09:42:22 019:00.139 Elapsed Time = 0. secs    
 09:42:24 019:00.659 GC Time = 580.25800000 secs$2 π
 09:42:26 019:01.179 Load Average Before  = 0  
 09:42:28 019:01.700 Load Average After   = 0  
 09:42:30 019:02.220 Average Load Average = 0. 
 09:42:32 019:02.741 ------------------------------------------------------------$2 ε
 09:42:33 019:03.262 NIL$2 π
 09:42:35 019:03.888 NIL$2 π
 09:42:37 019:04.408 3 lisp> (exitlisp)
 09:54:12 019:05.265 *log$2 ε

-------

∂30-Apr-84  1344	KESSLER@UTAH-20.ARPA 	[jwa@lanl: timings]    
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 30 Apr 84  13:44:47 PDT
Date: Mon 30 Apr 84 14:45:24-MDT
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: [jwa@lanl: timings]
To: rpg@SU-AI.ARPA, griss@UTAH-20.ARPA

More timings.  Looks like we got puzzle and triang.  FFT still has a problem
(Oh well)....

Bob.
                ---------------

Mail-From: NOT-LOGGED-IN created at 30-Apr-84 14:39:47
Return-Path: <jwa@lanl>
Received: from lanl by UTAH-20.ARPA with TCP; Mon 30 Apr 84 14:39:48-MDT
Date: 30 Apr 1984 14:37:01-MDT
From: jwa@lanl
Received: by LANL.ARPA (4.12/4.7)
	id AA02222; Mon, 30 Apr 84 14:36:14 mdt
Date: Mon, 30 Apr 84 14:36:14 mdt
From: jwa@lanl (Wayne Anderson)
Message-Id: <8404302036.AA02222@LANL.ARPA>
To: kessler@utah-20
Subject: timings

Bob,

Had a little problem with fft.  Here it comes.......

 14:20:46 000:20.294 *timepsl / 5 1.1  
 14:20:48 000:20.838  Timed Psl, No-Date-Yet   
 14:20:51 000:21.359 1 lisp> (off usermode pgwd plap pcode)    
 14:21:03 000:22.401   
 14:21:10 000:22.920 NIL       
 14:21:18 000:23.440 2 lisp> (dskin "time-fft")
 14:21:23 000:25.004 Should be loading (USEFUL COMMON NUMERIC-OPERATORS FAST-VECTORS TIMER)    
 14:21:25 000:25.525 NIL       
 14:21:27 000:26.071 Should be loading (MATHLIB)       
 14:21:29 000:26.591 NIL       
 14:21:31 000:27.141 NIL       
 14:21:33 000:27.692 NIL       
 14:21:35 000:28.226 NIL       
 14:21:37 000:28.777 NIL       
 14:21:40 000:29.555 **
 14:21:44 000:30.082 NIL       
 14:21:48 000:32.051 *** Garbage collection starting   
 14:21:58 000:33.342 *** GC 19: time 648831 ms 
 14:22:00 000:33.864 *** 123183 recovered, 22234 stable, 4583 active, 1 free   
 14:22:03 000:35.119 FFT       
 14:22:09 000:35.759 NIL       
 14:22:13 000:37.116 TIMIT     
 14:22:15 000:37.654 *** Garbage collection starting   
 14:22:19 000:38.757 *** GC 20: time 490168 ms 
 14:22:23 000:39.279 *** 85219 recovered, 22464 stable, 2221 active, 1 free    
 14:22:27 000:39.799   
 14:22:31 000:40.319 ------------------------------------------------------------      
 14:22:34 000:40.841 FFT Test  
 14:22:37 000:41.361 Timing performed on CRAY  
 14:22:39 000:41.891 23-apr-84 12:00:00.       
 14:22:41 000:42.411 ***** Non-numeric argument in arithmetic  
 14:22:43 000:42.933 ***** Continuable error: retry form is `(MINUS (QUOTE NIL))'      
 14:22:45 000:43.454 Break loop
 14:22:47 000:43.975 3 lisp break>> (exitlisp) 
 14:22:51 000:44.774 *timepsl / 5 1.1  
 14:22:53 000:45.300  Timed Psl, No-Date-Yet   
 14:22:56 000:45.820 1 lisp> (off usermode pgwd plap pcode)    
 14:23:10 000:46.862   
 14:23:12 000:47.381 NIL       
 14:23:14 000:47.901 2 lisp> (dskin "timepuzz")
 14:23:22 000:49.880 Should be loading (USEFUL COMMON NUMERIC-OPERATORS TIMER) 
 14:23:23 000:50.401 NIL       
 14:23:27 000:50.952 NIL       
 14:23:37 000:51.486 NIL       
 14:23:40 000:52.022 NIL       
 14:23:46 000:52.566 NIL       
 14:24:07 000:54.359 NIL       
 14:24:09 000:55.007 NIL       
 14:24:11 000:55.583 511       
 14:24:13 000:56.126 3 
 14:24:18 000:56.669 12
 14:24:21 000:57.226 8 
 14:24:27 000:58.342 NIL       
 14:24:29 000:59.230 NIL       
 14:24:34 001:00.503 NIL       
 14:24:38 001:02.155 NIL       
 14:24:43 001:03.843 NIL       
 14:24:45 001:04.406 NIL       
 14:24:48 001:05.173 *P*       
 14:24:50 001:06.090 FIT       
 14:24:52 001:06.783 *** Garbage collection starting   
 14:24:56 001:08.068 *** GC 19: time 643440 ms 
 14:24:58 001:08.590 *** 117958 recovered, 22234 stable, 9808 active, 1 free   
 14:25:00 001:09.353 PLACE     
 14:25:04 001:10.473 REMOVE    
 14:25:07 001:11.823 TRIAL     
 14:25:10 001:12.886 DEFINEPIECE       
 14:25:15 001:15.132 START     
 14:25:17 001:15.768 *** Garbage collection starting   
 14:25:21 001:17.040 *** GC 20: time 633084 ms 
 14:25:27 001:17.562 *** 116793 recovered, 31480 stable, 1727 active, 1 free   
 14:25:30 001:18.691 TIMIT     
 14:25:32 001:19.364 *** Garbage collection starting   
 14:25:35 001:20.258 *** GC 21: time 314584 ms 
 14:25:36 001:20.780 *** 32057 recovered, 31484 stable, 14 active, 1 free      
 14:25:39 001:21.300   
 14:25:41 001:21.821 ------------------------------------------------------------      
 14:25:53 001:22.341 Puzzle test       
 14:25:57 001:22.861 Timing performed on CRAY  
 14:26:03 001:24.577 23-apr-84 12:00:00.       
 14:26:05 001:25.097 success in 2005 trials    
 14:26:07 001:25.617   
 14:26:09 001:26.137 ........................................  
 14:26:11 001:26.658 Cpu (- GC) Time = 1007.13500000 secs      
 14:26:14 001:27.178 Elapsed Time = 0. secs    
 14:26:18 001:27.699 GC Time = 0. secs 
 14:26:20 001:28.219 Load Average Before  = 0  
 14:26:36 001:28.739 Load Average After   = 0  
 14:26:40 001:29.260 Average Load Average = 0. 
 14:26:45 001:29.781 ------------------------------------------------------------      
 14:26:47 001:30.302 NIL       
 14:26:58 001:30.876 NIL       
 14:27:00 001:31.395 3 lisp> (exitlisp)
 14:27:02 001:32.195 *timepsl / 5 1.1  
 14:27:07 001:32.720  Timed Psl, No-Date-Yet   
 14:27:11 001:33.241 1 lisp> (off usermode pgwd plap pcode)    
 14:27:14 001:34.283   
 14:27:16 001:34.802 NIL       
 14:27:18 001:35.322 2 lisp> (dskin "timetria")
 14:27:24 001:36.757 Should be loading (TIMER USEFUL FAST-VECTORS)     
 14:27:26 001:37.277 NIL       
 14:27:28 001:37.835 NIL       
 14:27:30 001:38.537 NIL       
 14:27:32 001:39.113 NIL       
 14:27:34 001:39.694 [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] 
 14:27:36 001:40.225 0 
 14:27:38 001:40.775 [0 0 0 0 0 0 0 0 0 0 0 0 0 0]     
 14:27:40 001:41.318 [1 2 4 3 5 6 1 3 6 2 5 4 11 12 13 7 8 4 4 7 11 8 12 13 6 10 15 9 14 13    
 14:27:41 001:41.840 13 14 15 9 10 6]  
 14:27:43 001:42.401 [2 4 7 5 8 9 3 6 10 5 9 8 12 13 14 8 9 5 2 4 7 5 8 9 3 6 10 5 9 8 12 13   
 14:27:45 001:42.922 14 8 9 5] 
 14:27:47 001:43.492 [4 7 11 8 12 13 6 10 15 9 14 13 13 14 15 9 10 6 1 2 4 3 5 6 1 3 6 2 5 4   
 14:27:49 001:44.014 11 12 13 7 8 4]   
 14:27:51 001:44.667 *** (LAST-POSITION): base 524227, length 11 words 
 14:27:53 001:45.188 LAST-POSITION     
 14:27:57 001:46.143 *** Garbage collection starting   
 14:28:02 001:47.429 *** GC 19: time 644885 ms 
 14:28:03 001:47.951 *** 123232 recovered, 22234 stable, 4534 active, 1 free   
 14:28:06 001:48.733 *** (TRY): base 524250, length 80 words   
 14:28:11 001:49.253 TRY       
 14:28:14 001:49.900 *** (GOGOGO): base 524372, length 17 words
 14:28:15 001:50.421 GOGOGO    
 14:28:20 001:51.621 *** (TIMIT): base 524415, length 206 words
 14:28:22 001:52.142 TIMIT     
 14:28:24 001:52.805 *** (TEST): base 524735, length 21 words  
 14:28:26 001:53.326 TEST      
 14:28:28 001:53.865 *** Garbage collection starting   
 14:28:30 001:54.843 *** GC 20: time 385086 ms 
 14:28:32 001:55.364 *** 59455 recovered, 22464 stable, 398 active, 1 free     
 14:28:36 001:55.885   
 14:28:38 001:56.405 ------------------------------------------------------------      
 14:28:40 001:56.926 Triang test       
 14:29:10 001:57.446 Timing performed on CRAY  
 14:29:53 002:15.231 23-apr-84 12:00:00.       
 14:29:55 002:15.752 ........................................  
 14:29:57 002:16.273 Cpu (- GC) Time = 1.45426160e+04 secs     
 14:29:58 002:16.794 Elapsed Time = 0. secs    
 14:30:00 002:17.314 GC Time = 0. secs 
 14:30:02 002:17.834 Load Average Before  = 0  
 14:30:03 002:18.354 Load Average After   = 0  
 14:30:05 002:18.875 Average Load Average = 0. 
 14:30:07 002:19.396 ------------------------------------------------------------      
 14:30:09 002:19.917 NIL       
 14:30:16 002:20.508 NIL       
 14:30:19 002:21.028 3 lisp> (exitlisp)
 14:30:53 002:21.885 *log      
  Wayne
-------

∂30-Apr-84  1402	KESSLER@UTAH-20.ARPA 	Benchmarks   
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 30 Apr 84  14:02:20 PDT
Date: Mon 30 Apr 84 15:02:45-MDT
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: Benchmarks
To: Griss@UTAH-20.ARPA, jwa@LANL.ARPA
cc: rpg@SU-AI.ARPA, Frank@UTAH-20.ARPA

I've edited it in.  I don't know if this is what RPG does, or not.  I just
took the times and added it to the table.

Bob.
-------
                          GABRIEL BENCHMARKS

RPG: Wed 28 Mar 84 07:19:00-MST
RRK: 30-April-84  0816 PST, with CRAY
MLG: 4:04pm  Saturday, 28 April 1984

            | KL-10b |   20-60 | 20-60  | 3600   |  20-60  |  CRAY
Benchmark   |MACLISP |InterLisp| PSL 3.2| ZetaL  | PSL 3.3 | PSL 3.2
---------------------------------------------------------------------
Boyer       |  6.47  |  25.458 | 11.74  |   12   |  11.87  | 1.853
Browse      | 13.64  |  70.321 | 23.82  |  32.1  |  23.06  | 4.677
Destruct    |  2.16  |   9.206 | 2.381  |  3.94  |   1.85  | 0.451
Traverse    |        |         |        |        |         |
 Initialize |  6.69  |  37.62  |  7.596 |  12.3  |  6.368  |
 Traverse   | 23.96  | 85.862  | 43.226 | 51.23  | 34.381  |
Tak         | 0.489  |  2.088  | 0.4662 |  0.59  |   .478  | 0.045
STak        |  3.5   |  6.379  | 2.682  |  2.48  |  2.685  | 1.112
CTak        |  2.85  | 44.672  | 2.958  |  8.84  |  2.995  | 0.606
Takl        |  2.81  |  3.786  | 2.521  |  6.32  |  2.533  | 0.307
Takr        | 0.488  |  2.162  | 0.609  |  0.59  |  0.61   | 0.069
Deriv       |  1.81  |  40.21  | 5.384  | 11.55  |  5.49   | 1.280
DDeriv      |  2.83  |  28.067 |  6.04  |  14.6  |  6.61   | 1.422
Fdderiv     |  2.15  |    -    |   -    |   -    |         |
Div2  
 Iterative  | 0.844  | 131.858 |   2.3  |  4.8   |  2.205  | 0.581
 Recursive  |  1.28  |  68.208 |   2.34 |  6.27  |  2.246  | 0.575
FFT         |  4.0   |   12.6  | 35.517 |  4.77  | 37.9    |
Puzzle      |  7.87  | 121.028 | 15.92  | 14.21  |  4.285  | 1.007
Triang      | 86.03  |2326.439 | 86.574 | 158.1  | 78.347  | 14.54
Fprint      |  0.78  |  4.451  | 4.665  |  3.1   | 4.636   | 3.618
Fread       |  0.98  |  4.476  | 5.725  |  6.25  | 5.131   | 20.41
Tprint      |  0.81  |  4.727  | 4.351  |  7.8   | 4.083   | 0.190
Frpoly, power = 2    
 r=x+y+z+1  | 5.0E-3 |    -    | 0.023  | 9.0E-3 | 0.021   | 1.3E-3
 r2=1000*r  | 5.0E-3 |    -    | 0.039  | 0.017  | 0.044   | 
 r3=r,flonum| 4.0E-3 |    -    | 0.025  | 9.0E-3 | 0.025   | 1.9E-3
Frpoly, power = 5    
 r=x+y+z+1  | 0.042  |    -    | 0.093  |  0.88  | 0.108   | 0.013
 r2=1000*r  | 0.067  |    -    | 0.425  |  0.26  | 0.358   | 
 r3=r,flonum| 0.042  |    -    | 0.136  | 0.094  | 0.166   | 0.020
Frpoly, power = 10   
 r=x+y+z+1  | 0.467  |    -    | 0.921  |  1.01  | 0.897   | 0.142
 r2=1000*r  | 0.926  |    -    | 6.46   |  5.1   | 6.199   | 
 r3=r,flonum|  0.47  |    -    | 1.509  |  1.07  | 1.603   | 0.229
Frpoly, power = 15   
 r=x+y+z+1  |  3.15  |    -    | 12.68  |  6.4   | 12.268  | 0.942
 r2=1000*r  |  9.43  |    -    | 68.195 |  50.3  | 67.636  | 
 r3=r,flonum|  3.16  |    -    | 11.138 |  6.8   | 11.514  | 1.606
--------------------------------------------------------------------


HP-20 is latest PSL 3.3, extended addressing.
SAIL is MacLisp on a dec10 (kl10b)
interlisp on a 2060
Symbolic 3600 with a lot of memory.
CRAY is PSL 3.2, LANL on Cray XMP
-------

---------------------
Martin, why don't we have hp times.  It would be really interesting!!!!!

Bob.
-------

∂14-May-84  1250	KESSLER@UTAH-20.ARPA 	Vax timings  
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 14 May 84  12:50:13 PDT
Date: Mon 14 May 84 13:50:34-MDT
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: Vax timings
To: rpg@SU-AI.ARPA

We've run your timing tests on our Vaxen, both 750 and 780.  A few
caveats:  1) Will didn't include real time, oh well, maybe next month.
 2) ON the 750, the destructure test (I think) bombed in the middle.  Will
reran it, and its results were spliced into the middle.
 3) You can get some idea for load averages from the w commands that were
issued at the top.
 4) The 780 tests were run by them selves, while the 750 had other users.

I've edited out the cruft, to make it easier to grab the numbers.

I've got our Apollo wizard running them on the Apollo, so should get them to
you in a day or so.

If you need anything else let me know.

Bob.
-------

∂14-May-84  1251	KESSLER@UTAH-20.ARPA 	750 timing tests  
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 14 May 84  12:50:41 PDT
Date: Mon 14 May 84 13:51:02-MDT
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: 750 timing tests
To: rpg@SU-AI.ARPA

  5:01am  up 30 days, 17:03,  2 users,  load average: 3.72, 2.71, 2.18
User     tty       login@  idle   JCPU   PCPU  what
peterson ttyi8     1:47am         5:45   2:29  tn utah-20 
Uharpo   ttydd     1:57am        38:45   4:55  -uucico 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Boyer Test
Timing performed on VAX/Unix
11-May-84 12:00:00.
*** Garbage collection starting
*** GC 6: time 9146 ms, 133342 recovered, 133344 free
*** Garbage collection starting
*** GC 7: time 15419 ms, 99512 recovered, 99511 free
*** Garbage collection starting
*** GC 8: time 16150 ms, 81540 recovered, 81542 free

........................................
Cpu (- GC) Time = 43.384 secs
Elapsed Time = 0.0 secs
GC Time = 40.715 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:17am  up 30 days, 17:19,  2 users,  load average: 3.07, 3.09, 2.81
User     tty       login@  idle   JCPU   PCPU  what
peterson ttyi8     1:47am         6:16   3:00  tn utah-20 
Uharpo   ttydd     1:57am        42:29   5:36  -uucico 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
browse
Timing performed on VAX/Unix
11-May-84 12:00:00.
*** Garbage collection starting
*** GC 6: time 3094 ms, 182986 recovered, 182988 free
*** Garbage collection starting
*** GC 7: time 3315 ms, 183046 recovered, 183045 free
*** Garbage collection starting
*** GC 8: time 3451 ms, 183068 recovered, 183070 free
*** Garbage collection starting
*** GC 9: time 3145 ms, 183122 recovered, 183121 free
*** Garbage collection starting
*** GC 10: time 3077 ms, 183186 recovered, 183188 free

........................................
Cpu (- GC) Time = 75.497 secs
Elapsed Time = 0.0 secs
GC Time = 16.082 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:24am  up 30 days, 17:26,  2 users,  load average: 2.33, 2.83, 2.81
User     tty       login@  idle   JCPU   PCPU  what
peterson ttyi8     1:47am     1   6:30   3:14  tn utah-20 
Uharpo   ttydd     1:57am        43:34   5:44  rsh utah-gr rnews 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
DDeriv Test, also same as FDDeriv
Timing performed on VAX/Unix
11-May-84 12:00:00.
*** Garbage collection starting
*** GC 6: time 1887 ms, 191698 recovered, 191700 free
*** Garbage collection starting
*** GC 7: time 2006 ms, 191672 recovered, 191671 free

........................................
Cpu (- GC) Time = 19.448 secs
Elapsed Time = 0.0 secs
GC Time = 3.893 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  8:00pm  up 32 days,  8:02,  1 users,  load average: 0.70, 0.26, 0.16
User     tty       login@  idle   JCPU   PCPU  what
galway   ttyp0     7:47pm     3   1:16     54  -csh 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Deriv Test.
Timing performed on VAX/Unix
12-May-84 12:00:00.
*** Garbage collection starting
*** GC 6: time 1887 ms, 191650 recovered, 191652 free
*** Garbage collection starting
*** GC 7: time 1989 ms, 191654 recovered, 191653 free

........................................
Cpu (- GC) Time = 15.368 secs
Elapsed Time = 0.0 secs
GC Time = 3.876 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
% Note, due to a bug, this one was run at a different time.
  8:01pm  up 32 days,  8:03,  0 users,  load average: 0.99, 0.45, 0.23
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
`Destructive' Test
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 7.225 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:30am  up 30 days, 17:32,  2 users,  load average: 3.09, 2.82, 2.78
User     tty       login@  idle   JCPU   PCPU  what
peterson ttyi8     1:47am         6:38   3:22  tn utah-20 
Uharpo   ttydd     1:57am        44:46   5:51  rsh utah-gr rnews 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Div test 1, iterative version of div
Timing performed on VAX/Unix
11-May-84 12:00:00.
*** Garbage collection starting
*** GC 6: time 2091 ms, 191226 recovered, 191227 free

........................................
Cpu (- GC) Time = 8.024 secs
Elapsed Time = 0.0 secs
GC Time = 2.091 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
*** Garbage collection starting
*** GC 7: time 2448 ms, 48851 recovered, 191322 free

------------------------------------------------------------
Div test 2, recursive version of div
Timing performed on VAX/Unix
11-May-84 12:00:00.
*** Garbage collection starting
*** GC 8: time 2788 ms, 191226 recovered, 191227 free

........................................
Cpu (- GC) Time = 7.293 secs
Elapsed Time = 0.0 secs
GC Time = 2.788 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:34am  up 30 days, 17:36,  3 users,  load average: 3.95, 3.34, 3.01
User     tty       login@  idle   JCPU   PCPU  what
howard   ttyh9     5:30am           26     15  ex BORONEXT.DAT 
peterson ttyi8     1:47am         6:52   3:36  tn utah-20 
Uharpo   ttydd     1:57am        45:35   6:00  -uucico 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
FFT Test
Timing performed on VAX/Unix
11-May-84 12:00:00.
*** Garbage collection starting
*** GC 6: time 2873 ms, 183197 recovered, 183198 free
*** Garbage collection starting
*** GC 7: time 2737 ms, 183192 recovered, 183191 free
*** Garbage collection starting
*** GC 8: time 2839 ms, 183189 recovered, 183192 free
*** Garbage collection starting
*** GC 9: time 3383 ms, 183198 recovered, 183197 free
*** Garbage collection starting
*** GC 10: time 3570 ms, 183186 recovered, 183189 free
*** Garbage collection starting
*** GC 11: time 2873 ms, 183192 recovered, 183191 free
*** Garbage collection starting
*** GC 12: time 3859 ms, 183189 recovered, 183192 free
*** Garbage collection starting
*** GC 13: time 3230 ms, 183192 recovered, 183191 free
*** Garbage collection starting
*** GC 14: time 2720 ms, 183183 recovered, 183186 free

........................................
Cpu (- GC) Time = 141.729 secs
Elapsed Time = 0.0 secs
GC Time = 28.084 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:48am  up 30 days, 17:50,  3 users,  load average: 4.03, 3.54, 3.27
User     tty       login@  idle   JCPU   PCPU  what
howard   ttyh9     5:30am         1:51   1:36  -h -c ls *.DAT 
peterson ttyi8     1:47am         7:22   4:06  tn utah-20 
Uharpo   ttydd     1:57am        48:37   6:38  rsh utah-gr rnews 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Fprint Test
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 5.338 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:52am  up 30 days, 17:54,  2 users,  load average: 4.51, 3.73, 3.39
User     tty       login@  idle   JCPU   PCPU  what
peterson ttyi8     1:47am         7:34   4:18  tn utah-20 
Uharpo   ttydd     1:57am        49:24   6:50  -uucico 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
fread test
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 7.548 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:55am  up 30 days, 17:57,  2 users,  load average: 3.65, 3.68, 3.43
User     tty       login@  idle   JCPU   PCPU  what
peterson ttyi8     1:47am         7:43   4:27  tn utah-20 
Uharpo   ttydd     1:57am        50:21   6:58  -uucico 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Puzzle test
Timing performed on VAX/Unix
11-May-84 12:00:00.
success in 2005 trials

........................................
Cpu (- GC) Time = 35.938 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  6:01am  up 30 days, 18:03,  2 users,  load average: 3.43, 3.44, 3.39
User     tty       login@  idle   JCPU   PCPU  what
peterson ttyi8     1:47am         7:45   4:29  tn utah-20 
Uharpo   ttydd     1:57am        52:04   7:16  rsh utah-gr rnews 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
tprint test
Timing performed on VAX/Unix
11-May-84 12:00:00.((((((!678E !567D !567D !456D !456D !345C) !234B (!567D 
........................................
Cpu (- GC) Time = 4.284 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  6:05am  up 30 days, 18:07,  2 users,  load average: 3.67, 3.64, 3.49
User     tty       login@  idle   JCPU   PCPU  what
peterson ttyi8     1:47am         7:58   4:42  tn utah-20 
Uharpo   ttydd     1:57am        53:20   7:25  -uucico 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Traverse init Test
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 35.53 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
*** (TIMIT): base 1147575, length 1051 bytes
TIMIT
*** Garbage collection starting
*** GC 6: time 10013 ms, 41511 recovered, 140819 free

------------------------------------------------------------
Traverse Test
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 185.657 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  6:23am  up 30 days, 18:24,  3 users,  load average: 3.24, 3.56, 3.56
User     tty       login@  idle   JCPU   PCPU  what
peterson ttyi8     1:47am         8:30     14  readnews -n net.micro 
Uharpo   ttydd     1:57am        58:19   8:25  -uucico 
Ubeesvax ttyde     6:14am           26     26  -uucico 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Triang test
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 523.192 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  6:44am  up 30 days, 18:46,  3 users,  load average: 3.62, 3.45, 3.26
User     tty       login@  idle   JCPU   PCPU  what
Uharpo   ttydd     1:57am        64:55   9:26  rsh utah-gr rnews 
Ubeesvax ttyde     6:14am         1:11   1:11  -uucico 
carter   ttyp0     6:35am     8   2:47   2:43  liszt 
PSL 3.2, 27-Apr-84
------------------------------------------------------------
TAK: Takai test, (TAK 18 12 6)
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 1.802 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
*** Garbage collection starting
*** GC 6: time 3213 ms, 77 recovered, 191727 free

------------------------------------------------------------
TAK: Takai test, (tak 10018 10012 10006)
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 1.819 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
TAKL: Takai test with lists
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 15.453 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
TAKR: Takai test--Gross Version with Lots of functions
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 2.822 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
STAK: Takai test using fluid binding
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 17.782 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
CTAK: Takai test using catch and throw
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 13.583 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  6:59am  up 30 days, 19:01,  3 users,  load average: 3.52, 3.50, 3.42
User     tty       login@  idle   JCPU   PCPU  what
Uharpo   ttydd     1:57am        69:33  10:09  -uucico 
Ubeesvax ttyde     6:14am         1:44   1:44  -uucico 
carter   ttyp0     6:35am    23   7:32   7:28  liszt 
PSL 3.2, 27-Apr-84
|||||||||||||||FPOLY benchmark, N = 2|||||||||||||||
*** Garbage collection starting
*** GC 5: time 1989 ms, 148253 recovered, 191067 free

------------------------------------------------------------
FPOLY test 1
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.068 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 6: time 2346 ms, 393 recovered, 191088 free

------------------------------------------------------------
FPOLY test 2
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.289 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 7: time 3502 ms, 875 recovered, 191087 free

------------------------------------------------------------
FPOLY test 3
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.119 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
|||||||||||||||FPOLY benchmark, N = 5|||||||||||||||
*** Garbage collection starting
*** GC 8: time 2448 ms, 488 recovered, 191088 free

------------------------------------------------------------
FPOLY test 1
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.408 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 9: time 2278 ms, 2101 recovered, 191087 free

------------------------------------------------------------
FPOLY test 2
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 1.581 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 10: time 2618 ms, 7065 recovered, 191088 free

------------------------------------------------------------
FPOLY test 3
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.493 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
|||||||||||||||FPOLY benchmark, N = 10|||||||||||||||
*** Garbage collection starting
*** GC 11: time 2125 ms, 3357 recovered, 191087 free

------------------------------------------------------------
FPOLY test 1
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 4.539 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 12: time 2992 ms, 18641 recovered, 191088 free

------------------------------------------------------------
FPOLY test 2
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 27.778 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 13: time 2635 ms, 94104 recovered, 191087 free

------------------------------------------------------------
FPOLY test 3
Timing performed on VAX/Unix
11-May-84 12:00:00.
........................................
Cpu (- GC) Time = 7.191 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
|||||||||||||||FPOLY benchmark, N = 15|||||||||||||||
*** Garbage collection starting
*** GC 14: time 2397 ms, 35156 recovered, 191088 free

------------------------------------------------------------
FPOLY test 1
Timing performed on VAX/Unix
11-May-84 12:00:00.
*** Garbage collection starting
*** GC 15: time 3162 ms, 186442 recovered, 186441 free

........................................
Cpu (- GC) Time = 81.668 secs
Elapsed Time = 0.0 secs
GC Time = 3.162 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 16: time 2975 ms, 126116 recovered, 191088 free

------------------------------------------------------------
FPOLY test 2
Timing performed on VAX/Unix
11-May-84 12:00:00.
*** Garbage collection starting
*** GC 17: time 3791 ms, 178294 recovered, 178309 free
*** Garbage collection starting
*** GC 18: time 4267 ms, 173193 recovered, 173202 free
*** Garbage collection starting
*** GC 19: time 3196 ms, 170378 recovered, 170388 free
*** Garbage collection starting
*** GC 20: time 5100 ms, 167695 recovered, 167708 free
*** Garbage collection starting
*** GC 21: time 5916 ms, 166318 recovered, 166324 free

........................................
Cpu (- GC) Time = 394.179 secs
Elapsed Time = 0.0 secs
GC Time = 22.27 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 22: time 3655 ms, 99603 recovered, 191088 free

------------------------------------------------------------
FPOLY test 3
Timing performed on VAX/Unix
11-May-84 12:00:00.
*** Garbage collection starting
*** GC 23: time 3638 ms, 182755 recovered, 182756 free

........................................
Cpu (- GC) Time = 61.115 secs
Elapsed Time = 0.0 secs
GC Time = 3.638 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
NIL
3 lisp> NIL
4 lisp> Exiting lisp
  7:56am  up 30 days, 19:58,  3 users,  load average: 3.53, 3.85, 4.17
User     tty       login@  idle   JCPU   PCPU  what
filesave console   7:09am    26  14:44     41  dump 5dltufs 1600 1 /dev/rmt12 1
Uharpo   ttydd     1:57am        83:30  11:49  rsh utah-gr rnews 
galway   ttyp1     7:41am     4     48     26  -csh 
-------

∂14-May-84  1253	KESSLER@UTAH-20.ARPA 	780 timing tests  
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 14 May 84  12:52:37 PDT
Date: Mon 14 May 84 13:51:42-MDT
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: 780 timing tests
To: rpg@SU-AI.ARPA

Note, these are both PSL version 3.2.
---------------
----- Vax 780 running PSL 3.2 ------  
  5:00am  up 6 days, 13:18,  0 users,  load average: 1.60, 1.08, 0.67
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Boyer Test
Timing performed on VAX/Unix
14-May-84 12:00:00.
*** Garbage collection starting
*** GC 6: time 4845 ms, 133342 recovered, 133343 free
*** Garbage collection starting
*** GC 7: time 7021 ms, 99512 recovered, 99512 free
*** Garbage collection starting
*** GC 8: time 8109 ms, 81540 recovered, 81541 free

........................................
Cpu (- GC) Time = 21.301 secs
Elapsed Time = 0.0 secs
GC Time = 19.975 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:06am  up 6 days, 13:24,  0 users,  load average: 2.03, 1.73, 1.10
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
browse
Timing performed on VAX/Unix
14-May-84 12:00:00.
*** Garbage collection starting
*** GC 6: time 1819 ms, 183014 recovered, 183015 free
*** Garbage collection starting
*** GC 7: time 1819 ms, 183086 recovered, 183086 free
*** Garbage collection starting
*** GC 8: time 1853 ms, 183122 recovered, 183123 free
*** Garbage collection starting
*** GC 9: time 1819 ms, 183096 recovered, 183096 free
*** Garbage collection starting
*** GC 10: time 1819 ms, 183200 recovered, 183201 free

........................................
Cpu (- GC) Time = 41.157 secs
Elapsed Time = 0.0 secs
GC Time = 9.129 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:09am  up 6 days, 13:27,  0 users,  load average: 2.01, 1.86, 1.26
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
DDeriv Test, also same as FDDeriv
Timing performed on VAX/Unix
14-May-84 12:00:00.
*** Garbage collection starting
*** GC 6: time 1071 ms, 191698 recovered, 191699 free
*** Garbage collection starting
*** GC 7: time 1037 ms, 191672 recovered, 191672 free

........................................
Cpu (- GC) Time = 10.234 secs
Elapsed Time = 0.0 secs
GC Time = 2.108 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:10am  up 6 days, 13:28,  0 users,  load average: 1.91, 1.87, 1.31
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Deriv Test.
Timing performed on VAX/Unix
14-May-84 12:00:00.
*** Garbage collection starting
*** GC 6: time 1054 ms, 191714 recovered, 191715 free
*** Garbage collection starting
*** GC 7: time 1054 ms, 191688 recovered, 191688 free

........................................
Cpu (- GC) Time = 8.585 secs
Elapsed Time = 0.0 secs
GC Time = 2.108 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:11am  up 6 days, 13:29,  0 users,  load average: 1.43, 1.75, 1.30
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
`Destructive' Test
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 3.876 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:11am  up 6 days, 13:30,  0 users,  load average: 1.24, 1.67, 1.29
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Div test 1, iterative version of div
Timing performed on VAX/Unix
14-May-84 12:00:00.
*** Garbage collection starting
*** GC 6: time 1105 ms, 191226 recovered, 191228 free

........................................
Cpu (- GC) Time = 3.808 secs
Elapsed Time = 0.0 secs
GC Time = 1.105 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
*** Garbage collection starting
*** GC 7: time 1088 ms, 48851 recovered, 191343 free

------------------------------------------------------------
Div test 2, recursive version of div
Timing performed on VAX/Unix
14-May-84 12:00:00.
*** Garbage collection starting
*** GC 8: time 1071 ms, 191226 recovered, 191228 free

........................................
Cpu (- GC) Time = 3.757 secs
Elapsed Time = 0.0 secs
GC Time = 1.071 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:12am  up 6 days, 13:31,  0 users,  load average: 1.11, 1.57, 1.27
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
FFT Test
Timing performed on VAX/Unix
14-May-84 12:00:00.
*** Garbage collection starting
*** GC 6: time 1496 ms, 183218 recovered, 183219 free
*** Garbage collection starting
*** GC 7: time 1496 ms, 183213 recovered, 183212 free
*** Garbage collection starting
*** GC 8: time 1496 ms, 183210 recovered, 183213 free
*** Garbage collection starting
*** GC 9: time 1479 ms, 183219 recovered, 183218 free
*** Garbage collection starting
*** GC 10: time 1513 ms, 183210 recovered, 183213 free
*** Garbage collection starting
*** GC 11: time 1496 ms, 183213 recovered, 183212 free
*** Garbage collection starting
*** GC 12: time 1479 ms, 183210 recovered, 183213 free
*** Garbage collection starting
*** GC 13: time 1479 ms, 183213 recovered, 183212 free
*** Garbage collection starting
*** GC 14: time 1513 ms, 183210 recovered, 183213 free

........................................
Cpu (- GC) Time = 60.554 secs
Elapsed Time = 0.0 secs
GC Time = 13.447 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:14am  up 6 days, 13:33,  0 users,  load average: 1.02, 1.39, 1.24
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Fprint Test
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 2.193 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:15am  up 6 days, 13:33,  0 users,  load average: 1.20, 1.40, 1.25
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
fread test
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 3.366 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:15am  up 6 days, 13:34,  0 users,  load average: 1.11, 1.35, 1.24
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Puzzle test
Timing performed on VAX/Unix
14-May-84 12:00:00.
success in 2005 trials

........................................
Cpu (- GC) Time = 16.286 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:16am  up 6 days, 13:35,  0 users,  load average: 1.07, 1.30, 1.23
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
tprint test
Timing performed on VAX/Unix
14-May-84 12:00:00.((((((!678E !567D !567D !456D !456D !345C) !234B (!567D 
........................................
Cpu (- GC) Time = 1.921 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:17am  up 6 days, 13:36,  0 users,  load average: 1.09, 1.28, 1.22
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Traverse init Test
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 15.062 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
*** (TIMIT): base 1147571, length 1051 bytes
TIMIT
*** Garbage collection starting
*** GC 6: time 4505 ms, 41511 recovered, 140840 free

------------------------------------------------------------
Traverse Test
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 72.352 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:19am  up 6 days, 13:38,  0 users,  load average: 1.03, 1.20, 1.20
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
Triang test
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 212.194 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:24am  up 6 days, 13:42,  0 users,  load average: 1.00, 1.10, 1.16
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
------------------------------------------------------------
TAK: Takai test, (TAK 18 12 6)
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.833 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
*** Garbage collection starting
*** GC 6: time 1054 ms, 77 recovered, 191748 free

------------------------------------------------------------
TAK: Takai test, (tak 10018 10012 10006)
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.816 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
TAKL: Takai test with lists
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 5.27 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
TAKR: Takai test--Gross Version with Lots of functions
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 1.173 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
STAK: Takai test using fluid binding
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 7.106 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
CTAK: Takai test using catch and throw
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 5.389 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
  5:26am  up 6 days, 13:44,  0 users,  load average: 1.01, 1.08, 1.14
User     tty       login@  idle   JCPU   PCPU  what
PSL 3.2, 27-Apr-84
|||||||||||||||FPOLY benchmark, N = 2|||||||||||||||
*** Garbage collection starting
*** GC 5: time 1156 ms, 145109 recovered, 191088 free

------------------------------------------------------------
FPOLY test 1
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.0 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 6: time 1122 ms, 395 recovered, 191109 free

------------------------------------------------------------
FPOLY test 2
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.068 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 7: time 1122 ms, 875 recovered, 191108 free

------------------------------------------------------------
FPOLY test 3
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.034 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
|||||||||||||||FPOLY benchmark, N = 5|||||||||||||||
*** Garbage collection starting
*** GC 8: time 1122 ms, 488 recovered, 191109 free

------------------------------------------------------------
FPOLY test 1
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.204 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 9: time 1122 ms, 2101 recovered, 191108 free

------------------------------------------------------------
FPOLY test 2
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.901 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 10: time 1139 ms, 7065 recovered, 191109 free

------------------------------------------------------------
FPOLY test 3
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 0.289 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
|||||||||||||||FPOLY benchmark, N = 10|||||||||||||||
*** Garbage collection starting
*** GC 11: time 1139 ms, 3357 recovered, 191108 free

------------------------------------------------------------
FPOLY test 1
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 2.006 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 12: time 1139 ms, 18641 recovered, 191109 free

------------------------------------------------------------
FPOLY test 2
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 13.022 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 13: time 1122 ms, 94104 recovered, 191108 free

------------------------------------------------------------
FPOLY test 3
Timing performed on VAX/Unix
14-May-84 12:00:00.
........................................
Cpu (- GC) Time = 3.06 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
|||||||||||||||FPOLY benchmark, N = 15|||||||||||||||
*** Garbage collection starting
*** GC 14: time 1122 ms, 35156 recovered, 191109 free

------------------------------------------------------------
FPOLY test 1
Timing performed on VAX/Unix
14-May-84 12:00:00.
*** Garbage collection starting
*** GC 15: time 1394 ms, 186446 recovered, 186446 free

........................................
Cpu (- GC) Time = 35.479 secs
Elapsed Time = 0.0 secs
GC Time = 1.394 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 16: time 1122 ms, 126112 recovered, 191109 free

------------------------------------------------------------
FPOLY test 2
Timing performed on VAX/Unix
14-May-84 12:00:00.
*** Garbage collection starting
*** GC 17: time 1564 ms, 178295 recovered, 178310 free
*** Garbage collection starting
*** GC 18: time 1632 ms, 173193 recovered, 173195 free
*** Garbage collection starting
*** GC 19: time 1785 ms, 170418 recovered, 170429 free
*** Garbage collection starting
*** GC 20: time 1802 ms, 167737 recovered, 167749 free
*** Garbage collection starting
*** GC 21: time 1887 ms, 166237 recovered, 166255 free

........................................
Cpu (- GC) Time = 161.619 secs
Elapsed Time = 0.0 secs
GC Time = 8.67 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 22: time 1122 ms, 99589 recovered, 191109 free

------------------------------------------------------------
FPOLY test 3
Timing performed on VAX/Unix
14-May-84 12:00:00.
*** Garbage collection starting
*** GC 23: time 1564 ms, 182767 recovered, 182767 free

........................................
Cpu (- GC) Time = 21.845 secs
Elapsed Time = 0.0 secs
GC Time = 1.564 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
NIL
3 lisp> NIL
4 lisp> Exiting lisp
  5:31am  up 6 days, 13:50,  0 users,  load average: 1.00, 1.04, 1.11
User     tty       login@  idle   JCPU   PCPU  what
-------

∂15-May-84  0921	Cassels@SCRC-STONY-BROOK.ARPA 	FFT benchmark 
Received: from SCRC-STONY-BROOK.ARPA by SU-AI.ARPA with TCP; 15 May 84  09:21:21 PDT
Received: from SCRC-CUYAHOGA by SCRC-STONY-BROOK via CHAOS with CHAOS-MAIL id 32955; Tue 15-May-84 12:20:48-EDT
Date: Tue, 15 May 84 12:16 EDT
From: dlw@SCRC-RIVERSIDE.ARPA
Sender: Cassels@SCRC-RIVERSIDE.ARPA
Subject: FFT benchmark
To: rpg@SU-AI.ARPA
Cc: lisp-designers@SCRC-RIVERSIDE.ARPA

The official time for the FFT benchmark that you got from BEE
is 4.77.  Due to minor performance tweaking, by the way, the
current time is 4.74.  Anyway, running the same program with
the FPA gets the time 3.53.

∂15-May-84  1220	KESSLER@UTAH-20.ARPA 	DN600   
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 15 May 84  12:20:15 PDT
Date: Tue 15 May 84 09:36:56-MDT
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: DN600
To: rpg@SU-AI.ARPA

1 lisp> (dskin "time←boyer.sl")
------------------------------------------------------------
Boyer Test
Timing performed on Apollo
14-May-1984 12:00:00.
*** Garbage collection starting
*** GC 2: time 9771 ms, 131358 recovered, 131359 free
*** Garbage collection starting
*** GC 3: time 15497 ms, 97980 recovered, 97980 free
*** Garbage collection starting
*** GC 4: time 15916 ms, 82632 recovered, 82633 free

........................................
Cpu (- GC) Time = 43.7 secs
Elapsed Time = 0.0 secs
GC Time = 41.184 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
NIL
2 lisp> (exitlisp)
------------------------------------------------------------
browse
Timing performed on Apollo
access violation  (OS/fault handler)
$ (exitlisp)
------------------------------------------------------------
DDeriv Test, also same as FDDeriv
Timing performed on Apollo
14-May-1984 12:00:00.
*** Garbage collection starting
*** GC 2: time 2581 ms, 188862 recovered, 188863 free
*** Garbage collection starting
*** GC 3: time 2643 ms, 188864 recovered, 188864 free

........................................
Cpu (- GC) Time = 27.296 secs
Elapsed Time = 0.0 secs
GC Time = 5.224 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
Deriv Test.
Timing performed on Apollo
14-May-1984 12:00:00.
*** Garbage collection starting
*** GC 2: time 2733 ms, 188850 recovered, 188851 free
*** Garbage collection starting
*** GC 3: time 2582 ms, 188846 recovered, 188846 free

........................................
Cpu (- GC) Time = 26.692 secs
Elapsed Time = 0.0 secs
GC Time = 5.315 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
`Destructive' Test
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 10.591 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
NIL
------------------------------------------------------------
Div test 1, iterative version of div
Timing performed on Apollo
14-May-1984 12:00:00.
*** Garbage collection starting
*** GC 2: time 2626 ms, 188502 recovered, 188504 free

........................................
Cpu (- GC) Time = 13.205 secs
Elapsed Time = 0.0 secs
GC Time = 2.626 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
*** Garbage collection starting
*** GC 3: time 2600 ms, 51653 recovered, 188535 free

------------------------------------------------------------
Div test 2, recursive version of div
Timing performed on Apollo
14-May-1984 12:00:00.
*** Garbage collection starting
*** GC 4: time 2636 ms, 188475 recovered, 188476 free

........................................
Cpu (- GC) Time = 6.704 secs
Elapsed Time = 0.0 secs
GC Time = 2.636 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
FFT Test
Timing performed on Apollo
14-May-1984 12:00:00.
***** Overflow occurred in 32 Bit Multiply
Break loop
2 lisp break>> (exitlisp)
------------------------------------------------------------
Fprint Test
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 4.43 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
fread test
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 6.525 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
FPOLY test 1
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 0.35 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 3: time 2847 ms, 420 recovered, 188241 free

------------------------------------------------------------
FPOLY test 2
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 0.142 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 4: time 2679 ms, 895 recovered, 188242 free

------------------------------------------------------------
FPOLY test 3
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 0.42 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
|||||||||||||||FPOLY benchmark, N = 5|||||||||||||||
*** Garbage collection starting
*** GC 5: time 2787 ms, 508 recovered, 188241 free

------------------------------------------------------------
FPOLY test 1
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 0.293 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 6: time 2684 ms, 2157 recovered, 188242 free

------------------------------------------------------------
FPOLY test 2
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 1.775 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 7: time 2985 ms, 7272 recovered, 188241 free

------------------------------------------------------------
FPOLY test 3
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 0.443 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
|||||||||||||||FPOLY benchmark, N = 10|||||||||||||||
*** Garbage collection starting
*** GC 8: time 2685 ms, 3427 recovered, 188242 free

------------------------------------------------------------
FPOLY test 1
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 3.601 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 9: time 2685 ms, 18767 recovered, 188241 free

------------------------------------------------------------
FPOLY test 2
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 33.333 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 10: time 2681 ms, 98084 recovered, 188242 free

------------------------------------------------------------
FPOLY test 3
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 5.691 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
|||||||||||||||FPOLY benchmark, N = 15|||||||||||||||
*** Garbage collection starting
*** GC 11: time 2689 ms, 35198 recovered, 188213 free

------------------------------------------------------------
FPOLY test 1
Timing performed on Apollo
14-May-1984 12:00:00.
*** Garbage collection starting
*** GC 12: time 3218 ms, 183498 recovered, 183500 free

........................................
Cpu (- GC) Time = 75.108 secs
Elapsed Time = 0.0 secs
GC Time = 3.218 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 13: time 3171 ms, 168005 recovered, 188241 free

------------------------------------------------------------
FPOLY test 2
Timing performed on Apollo
14-May-1984 12:00:00.
*** Garbage collection starting
*** GC 14: time 3763 ms, 174177 recovered, 174194 free
*** Garbage collection starting
*** GC 15: time 4756 ms, 169980 recovered, 169989 free
*** Garbage collection starting
*** GC 16: time 4584 ms, 167111 recovered, 167125 free
*** Garbage collection starting
*** GC 17: time 4238 ms, 165573 recovered, 165584 free
*** Garbage collection starting
*** GC 18: time 6315 ms, 159628 recovered, 159630 free

........................................
Cpu (- GC) Time = 485.284 secs
Elapsed Time = 0.0 secs
GC Time = 23.656 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 19: time 2708 ms, 162727 recovered, 188241 free

------------------------------------------------------------
FPOLY test 3
Timing performed on Apollo
14-May-1984 12:00:00.
*** Garbage collection starting
*** GC 20: time 4884 ms, 179596 recovered, 179598 free

........................................
Cpu (- GC) Time = 44.203 secs
Elapsed Time = 0.0 secs
GC Time = 4.884 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
Puzzle test
Timing performed on Apollo
14-May-1984 12:00:00.
success in 2005 trials

........................................
Cpu (- GC) Time = 28.923 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
%%%%%%%%%%%%%%%%
%
% THIS FILE APPEARS TO BE A HACK TO GET THE CRAY TO WORK...
%
%%%%%%%%%%%%%%%%
1 lisp> (dskin "time←setup.sl")

NIL
*** Function `LOAD' has been redefined
LOAD
***** Couldn't open file `backquote.sl'
***** Continuable error: retry form is `(DSKIN (QUOTE "backquote.sl"))'
Break loop
2 lisp break>> (exitlisp)
------------------------------------------------------------
TAK: Takai test, (TAK 18 12 6)
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 1.657 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
TAK: Takai test, (tak 10018 10012 10006)
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 1.649 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
TAKL: Takai test with lists
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 10.923 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
TAKR: Takai test--Gross Version with Lots of functions
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 2.131 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
STAK: Takai test using fluid binding
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 18.683 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
CTAK: Takai test using catch and throw
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 12.259 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
tprint test
Timing performed on Apollo
14-May-1984 12:00:00.((((((!678E !567D !567D !456D !456D !345C) !234B (!567D 
!456D !456D !345C !345C !234B) !123A (!456D !345C !345C !234B !234B !123A) 
  :
  :
OPQ8 OPQ8 MNO7) KLM6 (QRS9 OPQ8 OPQ8 MNO7 MNO7 KLM6) IJK5) GHI4) EFG3) CDE2) 
ABC1)

........................................
Cpu (- GC) Time = 2.731 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
1 lisp> (dskin "time←traverse.sl")

NIL
NIL
NIL
NIL
NIL
odd address error  (OS/fault handler)
$ (exitlisp)
------------------------------------------------------------
Triang test
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 416.47 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
-------

∂15-May-84  1219	KESSLER@UTAH-20.ARPA 	[John W. Peterson <JW-Peterson@UTAH-20.ARPA>: First crack at timing apollo tests]  
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 15 May 84  12:19:04 PDT
Date: Tue 15 May 84 09:36:13-MDT
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: [John W. Peterson <JW-Peterson@UTAH-20.ARPA>: First crack at timing apollo tests]
To: rpg@SU-AI.ARPA

The following messages will contain what we have been able to run on the
Apollos.  The first batch is the dn300 and the second is the dn600.  The
characteristics of each machine is included in the following messages.  Note,
there were some problems, but I thought that I'd get them off to you
asap.

Let me know if there is anything else that you need.
Bob.
p.s. These are also PSL 3.1, compared to 3.2 on the other machines.
                ---------------

   1) 14-May John W. Peterson     First crack at timing apollo tests
   2) 15-May John W. Peterson     putah:apollo←dn300←times.log

Message 1 -- ************************
Mail-From: JW-PETERSON created at 14-May-84 23:29:43
Date: Mon 14 May 84 23:29:43-MDT
From: John W. Peterson <JW-Peterson@UTAH-20.ARPA>
Subject: First crack at timing apollo tests
To: kesSLER@UTAH-20.ARPA
cc: galwAY@UTAH-20.ARPA

A first crack at timing tests is in putah:apollo←dn600←times.log.
Lowlights:
  browse blew up with access violation (car of NIL?)
  FFT blew up with "overflow in 32 bit multiply"
  time←traverse died with an odd address error; Nstruct is
     probably broken

The other tests appeared to work fine.  Any tools around for
tabelizing the output?  I'd like to see how it compares to 
other machines...

The full log is in //b/apollo/psl←test/test←results.

Machine specs:
  apollo Dn600- Dual 68000, 10Mhz, 2Mb memory, 4K high speed
  	cache.

  apollo Dn300- 68010, 10Mhz, 1.5Mb, no wait states
	(no cache needed)

I'll run the Dn300 test tonight.
-------

Message 2 -- ************************
Mail-From: JW-PETERSON created at 15-May-84 02:33:15
Date: Tue 15 May 84 02:33:15-MDT
From: John W. Peterson <JW-Peterson@UTAH-20.ARPA>
Subject: putah:apollo←dn300←times.log
To: kesSLER@UTAH-20.ARPA
cc: galway@UTAH-20.ARPA

...has the tests for the 300.

Deriv Test, Div Test, and FPOLY 2 all blew up with referance
to illegal address; most likly they loaded .b files that were
never rebuilt for the Dn300...  
-------
-------

∂15-May-84  1220	KESSLER@UTAH-20.ARPA 	Dn300   
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 15 May 84  12:19:39 PDT
Date: Tue 15 May 84 09:36:41-MDT
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: Dn300
To: rpg@SU-AI.ARPA

  #### PSL TIMING TEST ####
------------------------------------------------------------
Boyer Test
Timing performed on Apollo
14-May-1984 12:00:00.
*** Garbage collection starting
*** GC 2: time 11726 ms, 131358 recovered, 131359 free
*** Garbage collection starting
*** GC 3: time 16948 ms, 97980 recovered, 97980 free
*** Garbage collection starting
*** GC 4: time 19911 ms, 82632 recovered, 82633 free

........................................
Cpu (- GC) Time = 46.924 secs
Elapsed Time = 0.0 secs
GC Time = 48.585 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
browse
Timing performed on Apollo
access violation  (OS/fault handler)
$ (exitlisp)
? exitlisp : program not found
------------------------------------------------------------
DDeriv Test, also same as FDDeriv
Timing performed on Apollo
14-May-1984 12:00:00.
*** Garbage collection starting
*** GC 2: time 3019 ms, 188862 recovered, 188863 free
*** Garbage collection starting
*** GC 3: time 3226 ms, 188864 recovered, 188864 free

........................................
Cpu (- GC) Time = 28.953 secs
Elapsed Time = 0.0 secs
GC Time = 6.245 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
Deriv Test.
Timing performed on Apollo
14-May-1984 12:00:00.
*** Garbage collection starting
*** GC 2: time 2803 ms, 188850 recovered, 188851 free
reference to illegal address  (OS/MST manager)
$ (exitlisp)
? exitlisp : program not found
------------------------------------------------------------
`Destructive' Test
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 10.161 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
------------------------------------------------------------
Div test 1, iterative version of div
Timing performed on Apollo
reference to illegal address  (OS/MST manager)
$ (exitlisp)
? exitlisp : program not found
$ 
------------------------------------------------------------
FFT Test
Timing performed on Apollo
14-May-1984 12:00:00.
***** Overflow occurred in 32 Bit Multiply
Break loop
2 lisp break>> (exitlisp)
$ 
------------------------------------------------------------
Fprint Test
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 4.191 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
------------------------------------------------------------
fread test
Timing performed on Apollo
14-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 6.47 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
|||||||||||||||FPOLY benchmark, N = 2|||||||||||||||
*** Garbage collection starting
*** GC 2: time 2761 ms, 50542 recovered, 188252 free

------------------------------------------------------------
FPOLY test 1
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 0.4 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 3: time 2749 ms, 420 recovered, 188241 free

------------------------------------------------------------
FPOLY test 2
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 0.13 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 4: time 2731 ms, 895 recovered, 188242 free

------------------------------------------------------------
FPOLY test 3
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 0.43 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
|||||||||||||||FPOLY benchmark, N = 5|||||||||||||||
*** Garbage collection starting
*** GC 5: time 2731 ms, 508 recovered, 188241 free

------------------------------------------------------------
FPOLY test 1
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 0.298 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 6: time 2756 ms, 2157 recovered, 188242 free

------------------------------------------------------------
FPOLY test 2
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 1.634 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 7: time 2731 ms, 7272 recovered, 188241 free

------------------------------------------------------------
FPOLY test 3
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 0.445 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
|||||||||||||||FPOLY benchmark, N = 10|||||||||||||||
*** Garbage collection starting
*** GC 8: time 2750 ms, 3427 recovered, 188242 free

------------------------------------------------------------
FPOLY test 1
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 3.456 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 9: time 2730 ms, 18767 recovered, 188241 free

------------------------------------------------------------
FPOLY test 2
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 32.684 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 10: time 2731 ms, 98084 recovered, 188242 free

------------------------------------------------------------
FPOLY test 3
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 5.617 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
|||||||||||||||FPOLY benchmark, N = 15|||||||||||||||
*** Garbage collection starting
*** GC 11: time 2734 ms, 35198 recovered, 188213 free

------------------------------------------------------------
FPOLY test 1
Timing performed on Apollo
15-May-1984 12:00:00.
*** Garbage collection starting
*** GC 12: time 3279 ms, 183498 recovered, 183500 free

........................................
Cpu (- GC) Time = 70.273 secs
Elapsed Time = 0.0 secs
GC Time = 3.279 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
*** Garbage collection starting
*** GC 13: time 2878 ms, 168041 recovered, 188241 free

------------------------------------------------------------
FPOLY test 2
Timing performed on Apollo
reference to illegal address  (OS/MST manager)
$ (exitlisp)
------------------------------------------------------------
Puzzle test
Timing performed on Apollo
15-May-1984 12:00:00.
success in 2005 trials

........................................
Cpu (- GC) Time = 31.743 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------

------------------------------------------------------------
TAK: Takai test, (TAK 18 12 6)
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 1.622 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
*** Garbage collection starting
*** GC 2: time 2611 ms, 146 recovered, 188940 free

------------------------------------------------------------
TAK: Takai test, (tak 10018 10012 10006)
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 1.614 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
NIL
*** (LISTN): base 16#51B580, length 10#40 bytes
LISTN
*** (MAS): base 16#51B5C0, length 10#156 bytes
MAS
*** (SHORTERP): base 16#51B65C, length 10#60 bytes
SHORTERP
NIL
NIL
NIL
(18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1)
(12 11 10 9 8 7 6 5 4 3 2 1)
(6 5 4 3 2 1)
*** (TIMIT): base 16#51B6B0, length 10#1026 bytes
TIMIT
*** Garbage collection starting
*** GC 3: time 2612 ms, 21861 recovered, 188855 free

------------------------------------------------------------
TAKL: Takai test with lists
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 12.9 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------

------------------------------------------------------------
TAKR: Takai test--Gross Version with Lots of functions
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 1.755 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
NIL
NIL
*** (TAK): base 16#51EF50, length 10#38 bytes
TAK
*** (STAK): base 16#51EF78, length 10#236 bytes
STAK
NIL
NIL
NIL
*** (TIMIT): base 16#51F064, length 10#1036 bytes
TIMIT
*** Garbage collection starting
*** GC 7: time 2635 ms, 18818 recovered, 188855 free

------------------------------------------------------------
STAK: Takai test using fluid binding
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 19.44 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
NIL
NIL
*** (TAK): base 16#51F470, length 10#88 bytes
TAK
*** (TAK1): base 16#51F4C8, length 10#244 bytes
TAK1
NIL
NIL
*** (TIMIT): base 16#51F5BC, length 10#1038 bytes
TIMIT
*** Garbage collection starting
*** GC 8: time 2710 ms, 23572 recovered, 188856 free

------------------------------------------------------------
CTAK: Takai test using catch and throw
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 12.433 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
NIL
2 lisp> (exitlisp)
$ 

------------------------------------------------------------
tprint test
Timing performed on Apollo
15-May-1984 12:00:00.((((((!678E !567D !567D !456D !456D !345C) !234B (!567D 
!456D !456D !345C !345C !234B) !123A (!456D !345C !345C !234B !234B !123A) 
 :
BC1)

........................................
Cpu (- GC) Time = 2.724 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
NIL
2 lisp> (exitlisp)
$ 
$ psl300
 Portable Standard LISP
1 lisp> (dskin "time←traverse.sl")

NIL
NIL
NIL
NIL
NIL
odd address error  (OS/fault handler)
$ (exitlisp)
? exitlisp : program not found
------------------------------------------------------------
Triang test
Timing performed on Apollo
15-May-1984 12:00:00.
........................................
Cpu (- GC) Time = 439.902 secs
Elapsed Time = 0.0 secs
GC Time = 0.0 secs
Load Average Before  = 0
Load Average After   = 0
Average Load Average = 0.0
------------------------------------------------------------
NIL
NIL
2 lisp> (exitlisp)
$ 
-------

∂18-May-84  1212	jkf%ucbmike@Berkeley 	benchmarks   
Received: from UCB-VAX.ARPA by SU-AI.ARPA with TCP; 18 May 84  12:12:23 PDT
Received: from ucbmike.ARPA by UCB-VAX.ARPA (4.24/4.27)
	id AA14706; Fri, 18 May 84 12:12:49 pdt
Date: Fri, 18 May 84 12:12:54 pdt
From: John Foderaro (on an sun) <jkf%ucbmike@Berkeley>
Message-Id: <8405181912.2328@ucbmike.ARPA>
Received: by ucbmike.ARPA (4.24ucb/3.5)
	id AA02328; Fri, 18 May 84 12:12:54 pdt
To: rpg@su-ai
Subject: benchmarks
Cc: fateman%ucbmike@Berkeley

 I'll be sending along a batch of benchmarks run on 'franz' a sun 2 (68010).
 

∂18-May-84  1215	jkf%ucbkim@Berkeley 
Received: from UCB-VAX.ARPA by SU-AI.ARPA with TCP; 18 May 84  12:15:09 PDT
Received: from ucbkim.ARPA by UCB-VAX.ARPA (4.24/4.27)
	id AA14787; Fri, 18 May 84 12:15:24 pdt
Received: by ucbkim.ARPA (4.24/4.27)
	id AA21240; Fri, 18 May 84 12:15:30 pdt
Date: Fri, 18 May 84 12:15:30 pdt
From: John Foderaro <jkf%ucbkim@Berkeley>
Message-Id: <8405181915.AA21240@ucbkim.ARPA>
To: rpg@su-ai

--- Benchmark tak run on franz at Fri May 18 08:17:56 PDT 1984 by jkf
--- cpu usage: 8:17am up 2:35, 4 users, load average: 1.83, 1.50, 1.20
Franz Lisp, Opus 40.03
(c) copyright 1984 by Franz Inc., Oakland Ca.
-> [fasl tak.o]
t
-> benchmark: test (file tak) , tranlinks: on, localf: no
executing form: (tak 18 12 6)
begin (127 299)
end (332 299)
runs 1
avg cpu time 3.416666666666667

benchmark: test (file tak) , tranlinks: off, localf: no
executing form: (tak 18 12 6)
begin (334 299)
end (1113 299)
runs 1
avg cpu time 12.98333333333333

nil
-> Franz Lisp, Opus 40.03
(c) copyright 1984 by Franz Inc., Oakland Ca.
-> [fasl tak-l.o]
t
-> benchmark: test (file tak) , tranlinks: on, localf: yes
executing form: (tak 18 12 6)
begin (125 302)
end (266 302)
runs 1
avg cpu time 2.35

benchmark: test (file tak) , tranlinks: off, localf: yes
executing form: (tak 18 12 6)
begin (267 302)
end (413 302)
runs 1
avg cpu time 2.433333333333333

nil
-> Franz Lisp, Opus 40.03
(c) copyright 1984 by Franz Inc., Oakland Ca.
-> [load tak.l]
[load benchmac.l]
t
-> benchmark: test (file tak) , tranlinks: on, interpreted 
executing form: (tak 18 12 6)
begin (155 300)
end (5747 300)
runs 1
avg cpu time 93.2

benchmark: test (file tak) , tranlinks: off, interpreted 
executing form: (tak 18 12 6)
begin (5750 300)
end (11479 300)
runs 1
avg cpu time 95.48333333333333

nil
-> --- cpu usage: 8:25am up 2:43, 4 users, load average: 1.99, 1.80, 1.50
--- end of benchmark tak

∂18-May-84  1215	jkf%ucbkim@Berkeley 
Received: from UCB-VAX.ARPA by SU-AI.ARPA with TCP; 18 May 84  12:15:22 PDT
Received: from ucbkim.ARPA by UCB-VAX.ARPA (4.24/4.27)
	id AA14790; Fri, 18 May 84 12:15:25 pdt
Received: by ucbkim.ARPA (4.24/4.27)
	id AA21238; Fri, 18 May 84 12:15:28 pdt
Date: Fri, 18 May 84 12:15:28 pdt
From: John Foderaro <jkf%ucbkim@Berkeley>
Message-Id: <8405181915.AA21238@ucbkim.ARPA>
To: rpg@su-ai

--- Benchmark frpoly run on franz at Thu May 17 19:16:26 PDT 1984 by jkf
--- cpu usage: 7:16pm up 4 mins, 3 users, load average: 1.09, 0.65, 0.26
Franz Lisp, Opus 40.03
(c) copyright 1984 by Franz Inc., Oakland Ca.
-> [fasl frpoly.o]
t
-> benchmark: test (file frpoly) , tranlinks: on, localf: no
executing form: (frp-bench)
(power= 2 (0.05 0.0 0.05) (0.05 0.0 0.05) (0.05 0.0 0.05))
(power= 5 (0.36667 0.0 0.36667) (0.8 0.0 0.8) (0.48333 0.0 0.48333))
(power= 10 (4.7167 0.0 4.7167) (17.883 2.1667 15.717) (9.95 4.25 5.7))
(power= 15 (39.15 4.4833 34.667) (211.33 30.833 180.5) (61.433 20.55 40.883))
begin (93 246)
end (20962 3983)
runs 1
avg cpu time 285.53, avg gc time 62.283

benchmark: test (file frpoly) , tranlinks: off, localf: no
executing form: (frp-bench)
(power= 2 (0.1 0.0 0.1) (0.1 0.0 0.1) (0.083333 0.0 0.083333))
(power= 5 (0.9 0.0 0.9) (1.3167 0.0 1.3167) (0.98333 0.0 0.98333))
(power= 10 (10.433 0.0 10.433) (21.35 0.0 21.35) (11.433 0.0 11.433))
(power= 15 (79.817 6.8 73.017) (240.32 21.65 218.67) (96.95 17.233 79.717))
begin (20965 3983)
end (48889 6724)
runs 1
avg cpu time 419.72, avg gc time 45.683

nil
-> Franz Lisp, Opus 40.03
(c) copyright 1984 by Franz Inc., Oakland Ca.
-> [fasl frpoly-l.o]
t
-> benchmark: test (file frpoly) , tranlinks: on, localf: yes
executing form: (frp-bench)
(power= 2 (0.016667 0.0 0.016667) (0.033333 0.0 0.033333) (0.05 0.0 0.05))
(power= 5 (0.33333 0.0 0.33333) (0.73333 0.0 0.73333) (0.43333 0.0 0.43333))
(power= 10 (4.1667 0.0 4.1667) (17.15 2.0833 15.067) (9.3667 4.2 5.1667))
(power= 15 (35.517 4.4 31.117) (207.5 30.733 176.77) (58.133 20.533 37.6))
begin (95 252)
end (20194 3969)
runs 1
avg cpu time 273.03, avg gc time 61.95

benchmark: test (file frpoly) , tranlinks: off, localf: yes
executing form: (frp-bench)
(power= 2 (0.033333 0.0 0.033333) (0.05 0.0 0.05) (0.05 0.0 0.05))
(power= 5 (0.45 0.0 0.45) (0.85 0.0 0.85) (0.53333 0.0 0.53333))
(power= 10 (5.6333 0.0 5.6333) (16.533 0.0 16.533) (6.65 0.0 6.65))
(power= 15 (49.467 6.7667 42.7) (209.98 21.683 188.3) (66.367 17.183 49.183))
begin (20197 3969)
end (41677 6707)
runs 1
avg cpu time 312.37, avg gc time 45.633

nil
-> Franz Lisp, Opus 40.03
(c) copyright 1984 by Franz Inc., Oakland Ca.
-> [load frpoly.l]
[load benchmac.l]
t
-> benchmark: test (file frpoly) , tranlinks: on, interpreted 
executing form: (frp-bench)
(power= 2 (0.56667 0.0 0.56667) (0.56667 0.0 0.56667) (0.55 0.0 0.55))
(power= 5 (5.9833 0.0 5.9833) (6.4 0.0 6.4) (6.0667 0.0 6.0667))
(power= 10 (67.85 0.0 67.85) (81.05 2.2833 78.767) (73.85 4.4667 69.383))
(power= 15 (487.75 4.7333 483.02) (664.17 35.417 628.75) (514.52 21.917 492.6))
begin (146 248)
end (114814 4377)
runs 1
avg cpu time 1842.3, avg gc time 68.817

benchmark: test (file frpoly) , tranlinks: off, interpreted 
executing form: (frp-bench)
(power= 2 (0.53333 0.0 0.53333) (0.56667 0.0 0.56667) (0.55 0.0 0.55))
(power= 5 (5.95 0.0 5.95) (6.4 0.0 6.4) (6.1 0.0 6.1))
(power= 10 (67.883 0.0 67.883) (78.783 0.0 78.783) (69.317 0.0 69.317))
(power= 15 (490.08 7.0833 483.0) (653.9 22.617 631.28) (513.12 18.167 494.95))
begin (114819 4377)
end (228518 7249)
runs 1
avg cpu time 1847.1, avg gc time 47.867

nil
-> --- cpu usage: 8:49pm up 1:37, 4 users, load average: 1.61, 1.32, 1.05
--- end of benchmark frpoly

∂18-May-84  1216	jkf%ucbkim@Berkeley 
Received: from UCB-VAX.ARPA by SU-AI.ARPA with TCP; 18 May 84  12:15:38 PDT
Received: from ucbkim.ARPA by UCB-VAX.ARPA (4.24/4.27)
	id AA14785; Fri, 18 May 84 12:15:41 pdt
Received: by ucbkim.ARPA (4.24/4.27)
	id AA21234; Fri, 18 May 84 12:15:23 pdt
Date: Fri, 18 May 84 12:15:23 pdt
From: John Foderaro <jkf%ucbkim@Berkeley>
Message-Id: <8405181915.AA21234@ucbkim.ARPA>
To: rpg@su-ai

--- Benchmark dderiv run on franz at Fri May 18 07:33:22 PDT 1984 by jkf
--- cpu usage: 7:33am up 1:51, 4 users, load average: 0.87, 1.08, 1.14
Franz Lisp, Opus 40.03
(c) copyright 1984 by Franz Inc., Oakland Ca.
-> [fasl dderiv.o]
t
-> benchmark: test (file deriv) , tranlinks: on, localf: no
executing form: (run)
begin (130 299)
end (4156 3265)
runs 1
avg cpu time 17.66666666666667, avg gc time 49.43333333333333

benchmark: test (file deriv) , tranlinks: off, localf: no
executing form: (run)
begin (4160 3265)
end (9650 6237)
runs 1
avg cpu time 41.96666666666667, avg gc time 49.53333333333333

nil
-> Franz Lisp, Opus 40.03
(c) copyright 1984 by Franz Inc., Oakland Ca.
-> [fasl dderiv-l.o]
t
-> benchmark: test (file deriv) , tranlinks: on, localf: yes
executing form: (run)
begin (130 297)
end (4016 3257)
runs 1
avg cpu time 15.43333333333333, avg gc time 49.33333333333334

benchmark: test (file deriv) , tranlinks: off, localf: yes
executing form: (run)
begin (4020 3257)
end (8121 6212)
runs 1
avg cpu time 19.1, avg gc time 49.25

nil
-> Franz Lisp, Opus 40.03
(c) copyright 1984 by Franz Inc., Oakland Ca.
-> [load dderiv.l]
[load benchmac.l]
t
-> benchmark: test (file deriv) , tranlinks: on, interpreted 
executing form: (run)
begin (161 306)
end (18791 4307)
runs 1
avg cpu time 243.8166666666667, avg gc time 66.68333333333334

benchmark: test (file deriv) , tranlinks: off, interpreted 
executing form: (run)
begin (18796 4307)
end (37436 8309)
runs 1
avg cpu time 243.9666666666667, avg gc time 66.7

nil
-> --- cpu usage: 7:49am up 2:07, 4 users, load average: 1.00, 1.00, 1.00
--- end of benchmark dderiv

∂18-May-84  1215	jkf%ucbkim@Berkeley 
Received: from UCB-VAX.ARPA by SU-AI.ARPA with TCP; 18 May 84  12:15:20 PDT
Received: from ucbkim.ARPA by UCB-VAX.ARPA (4.24/4.27)
	id AA14786; Fri, 18 May 84 12:15:23 pdt
Received: by ucbkim.ARPA (4.24/4.27)
	id AA21236; Fri, 18 May 84 12:15:25 pdt
Date: Fri, 18 May 84 12:15:25 pdt
From: John Foderaro <jkf%ucbkim@Berkeley>
Message-Id: <8405181915.AA21236@ucbkim.ARPA>
To: rpg@su-ai

--- Benchmark deriv run on franz at Fri May 18 07:49:50 PDT 1984 by jkf
--- cpu usage: 7:49am up 2:07, 4 users, load average: 1.00, 1.00, 1.00
Franz Lisp, Opus 40.03
(c) copyright 1984 by Franz Inc., Oakland Ca.
-> [fasl deriv.o]
t
-> benchmark: test (file deriv) , tranlinks: on, localf: no
executing form: (run)
begin (133 301)
end (3840 3088)
runs 1
avg cpu time 15.33333333333333, avg gc time 46.45

benchmark: test (file deriv) , tranlinks: off, localf: no
executing form: (run)
begin (3843 3088)
end (9010 5880)
runs 1
avg cpu time 39.58333333333334, avg gc time 46.53333333333333

nil
-> Franz Lisp, Opus 40.03
(c) copyright 1984 by Franz Inc., Oakland Ca.
-> [fasl deriv-l.o]
t
-> benchmark: test (file deriv) , tranlinks: on, localf: yes
executing form: (run)
begin (121 297)
end (3682 3090)
runs 1
avg cpu time 12.8, avg gc time 46.55

benchmark: test (file deriv) , tranlinks: off, localf: yes
executing form: (run)
begin (3686 3090)
end (7479 5883)
runs 1
avg cpu time 16.66666666666667, avg gc time 46.55

nil
-> Franz Lisp, Opus 40.03
(c) copyright 1984 by Franz Inc., Oakland Ca.
-> [load deriv.l]
[load benchmac.l]
t
-> benchmark: test (file deriv) , tranlinks: on, interpreted 
executing form: (run)
begin (161 301)
end (18082 4015)
runs 1
avg cpu time 236.7833333333333, avg gc time 61.9

benchmark: test (file deriv) , tranlinks: off, interpreted 
executing form: (run)
begin (18087 4015)
end (36148 7865)
runs 1
avg cpu time 236.85, avg gc time 64.16666666666667

nil
-> --- cpu usage: 8:06am up 2:24, 4 users, load average: 1.15, 1.17, 1.00
--- end of benchmark deriv

∂22-May-84  1152	EJG@S1-A.ARPA 	DESTRUCTIVE better & better   
Received: from S1-A.ARPA by SU-AI.ARPA with TCP; 22 May 84  11:52:15 PDT
Date: 22 May 84  1149 PDT
From: Erik Gilbert <EJG@S1-A.ARPA>
Subject: DESTRUCTIVE better & better  
To:   rpg@SU-AI.ARPA   

I tried it again with the official Mark IIA debugging stopwatch and got
0.91 seconds per iteration (same test as before) repeatably!!

∂23-May-84  1640	EJG@S1-A.ARPA 	S-1 TAKL time  
Received: from S1-A.ARPA by SU-AI.ARPA with TCP; 23 May 84  16:40:37 PDT
Date: 23 May 84  1636 PST
From: Erik Gilbert <EJG@S1-A.ARPA>
Subject: S-1 TAKL time
To:   rpg@SU-AI.ARPA
CC:   PMF@S1-A.ARPA  

[this is for 100]
(TESTER) for TAKL takes 330.7 seconds by the stopwatch.  Two questions
come to mind about the code generated for SHORTERP in TAKL:

(1) How come the (or (null x) ...) generates the sequence CMPSF, indexed MOV
(indexed by the CMPSF result!), and then SKP.EQL, instead of just a single skip?

(2) How come the recursive call to SHORTERP isn't removed as a tail recursion?

(refer to: {sail} TAKL.LSP[nil,s1], TAKL.LAP[nil,s1];
       or: {s1-a} TAKL.LSP[1,ejg], TAKL.LAP[1,ejg])

New time:
2.92

If shorterp is defined with a DO, then it is 1.18. Alas, no cheating allowed.
∂01-Jun-84  1521	CL.BOYER@UTEXAS-20.ARPA 	[Bill Murray <ATP.Murray@UTEXAS-20.ARPA>: Dandelion/3600 Benchmark Results]
Received: from UTEXAS-20.ARPA by SU-AI.ARPA with TCP; 1 Jun 84  15:19:52 PDT
Date: Fri 1 Jun 84 17:20:35-CDT
From:  Bob Boyer <CL.BOYER@UTEXAS-20.ARPA>
Subject: [Bill Murray <ATP.Murray@UTEXAS-20.ARPA>: Dandelion/3600 Benchmark Results]
To: rpg@SU-AI.ARPA

Thought you might be interested in these runs.  When
are we going to get the fruits of your investigations?
                ---------------

Mail-From: ATP.MURRAY created at  1-Jun-84 14:29:07
Date: Fri 1 Jun 84 14:29:07-CDT
From: Bill Murray <ATP.Murray@UTEXAS-20.ARPA>
Subject: Dandelion/3600 Benchmark Results
To: cl.boyer@UTEXAS-20.ARPA


   Sorry, I've been so long about this.  If you want more
benchmarks or different functions just let me know.


	  Results of Running Benchmarks on 3600 and Dandelion


[NOTE: ALL 3600 TIMES WITH GARBAGE COLLECTOR OFF.  ALL CODE, BOTH MACHINES, WAS INTERPRETED.]

                           Time/fn call on 3600     Time/fn call on Dandelion    Ratio Dandelion time/3600 time

(EAT-CONS-CELLS)           0.1850556 seconds        2.5836 seconds               13.96121

(TRIANGLE-N 100)           0.19296041 seconds       1.9674 seconds               10.195873

(WORKOUT 50)               2.80084 seconds          21.3133 seconds               7.60961


Since (EAT-CONS-CELLS) does little more than CONS up 100 cells, the following estimates result:

                           3600        Dandelion

Time Per CONS              1.85 ms     25.84 ms

Since (TRIANGLE-N 100) does little more than perform 100 fixnum additions, the following estimates result:

                           3600        Dandelion

Time per fixnum addition:  1.93 ms     19.67 ms


;;;Data and Code from which these results were produced:

;;;INTERLISP CODE and TIMING RESULTS

(DEFINEQ
  (EAT-CONS-CELLS ()
		  (SETQ X NIL)
		  (FOR I FROM 1 TO 100 DO (SETQ X (CONS NIL X)))))

;;;(TIME FUNCTION-TO-EXECUTE NUMBER-OF-TIMES DATA-TO-TIME-SWITCH)

(TIME (EAT-CONS-CELLS) 10 3)

1000/10 = 100 CONSES                 ;;per function call of EAT-CONS-CELLS
25.836/10 = 2.5836 SECONDS CPU TIME

(TIME (EAT-CONS-CELLS) 10 0)

1000/10 = 100 CONSES
25.842 SECONDS REAL TIME	     ;;total, for all 10 function calls.

(DEFINEQ (TRIANGLE-N (N)
		     (PROG (SUM-SO-FAR)
			   (SETQ SUM-SO-FAR 0)
			   (FOR I FROM 1 TO N DO
				(SETQ SUM-SO-FAR
				      (PLUS SUM-SO-FAR I)))
			   (PRINTOUT T "SUM IS" SUM-SO-FAR)
			   (RETURN SUM-SO-FAR))))

(TIME (TRIANGLE-N 100) 10 3)

0 CONSES
19.674/10 = 1.9674 SECONDS
0 SECONDS GC

(TIME (TRIANGLE-N 100) 10 0)

0 CONSES
19.681/10 = 1.9681 SECONDS
19.685 SECONDS REAL TIME

(DEFINEQ (CONS-ALIST (N)
		     (PROG (RESULT)
			   (SETQ RESULT NIL)
			   (FOR I FROM 1 TO N DO
				(SETQ RESULT
				      (CONS (CONS I I) RESULT)))
			   (RETURN RESULT))))

(DEFINEQ (LOOKUP (KEY ALIST)
		 (CONS ((NULL ALIST) NIL)
		       ((EQUAL (CAAR ALIST) KEY) (CDAR ALIST))
		       (T (LOOKUP KEY (CDR ALIST))))))

(DEFINEQ (WORKOUT (N)
		  (PROG (ALIST SUM-SO-FAR)
			(SETQ ALIST (CONS-ALIST N))
			(SETQ SUM-SO-FAR 0)
			(FOR I FROM 1 TO N DO
			     (SETQ SUM-SO-FAR
				   (PLUS (LOOKUP I ALIST) SUM-SO-FAR)))
			(RETURN SUM-SO-FAR))))

(TIME (WORKOUT 50) 10 3)

1000/10 = 100 CONSES
213.133/10 = 21.3133 SECONDS
0 TIME GC

(TIME (WORKOUT 50) 10 0)
1000/10 = 100 CONSES
213.055/10 = 21.3055 SECONDS
213.06 SECONDS REAL TIME

;;;CORRESPONDING ZETALIST CODE and TIMING RESULTS

(defun time-fn (sexp n &aux init-time final-time elapsed-time-total)
  (format t "~%Timing ~d evaluations of ~a.~%" n sexp)
  (setq init-time (time:microsecond-time))
  (loop for i from 1 to n do (eval sexp))
  (setq final-time (time:microsecond-time))
  (setq elapsed-time-total (// (- final-time init-time) 1e6))
  (format t "~%Total elapsed time: ~d seconds" elapsed-time-total)
  (format t "~%Total elapsed time per evaluation of ~a: ~d seconds" sexp (// elapsed-time-total n)))

(defun eat-cons-cells ()
  (setq x nil)
  (loop for i from 1 to 100 do (setq x (cons nil x))))

(time-fn '(eat-cons-cells) 10)
Timing 10 evalutations of (EAT-CONS-CELLS).

Total elapsed time: 1.850556 seconds
Total elapsed time per evaluation of (EAT-CONS-CELLS): 0.1850556 seconds

(defun triangle-n (n)
  (prog (sum-so-far)
	(setq sum-so-far 0)
	(loop for i from 1 to n do
	     (setq sum-so-far (plus sum-so-far i)))
	(format t "sum is ~d" sum-so-far)
	(return sum-so-far)))

(time-fn '(triangle-n 100) 10)
Timing 10 evaluations of (TRIANGLE-N 100.).
sum is 5050sum is 5050sum is 5050sum is 5050sum is 5050sum is 5050sum is 5050sum is 5050sum is 5050sum is 5050
Total elapsed time: 1.929604 seconds
Total elapsed time per evaluation of (TRIANGLE-N 100): 0.19296041 seconds

(defun cons-alist (n)
  (prog (result)
	(setq result nil)
	(loop for i from 1 to n do
	     (setq result
		   (cons (cons i i) result)))
	(return result)))

(defun lookup (key alist)
  (cond ((null alist) nil)
	((equal (caar alist) key) (cdar alist))
	(t (lookup key (cdr alist)))))

(defun workout (n)
  (prog (alist sum-so-far)
	(setq alist (cons-alist n))
	(setq sum-so-far 0)
	(loop for i from 1 to n do
	      (setq sum-so-far
		    (plus (lookup i alist) sum-so-far)))
	(return sum-so-far)))

(time-fn '(workout 50) 10)
Timing 10 evalutations of (WORKOUT 50.).

Total elapsed time: 28.0084 seconds
Total elapsed time per evaluation of (WORKOUT 50.): 2.80084 seconds

- Regards,
      Bill
-------
-------

∂12-Jun-84  0006	EJG@S1-A.ARPA 	S-1 DERIV time 
Received: from S1-A.ARPA by SU-AI.ARPA with TCP; 12 Jun 84  00:06:02 PDT
Date: 12 Jun 84  0006 PST
From: Erik Gilbert <EJG@S1-A.ARPA>
Subject: S-1 DERIV time
To:   rpg@SU-AI.ARPA   

So, DERIV was easy.  I had to kluge in a definition of MAPCAR (which
unfortunately does an NCONS, since it uses APPLY (is there a FUNCALL
lurking somewhere?)), but other than that DERIV.LSP[NIL,S1] worked
the first time out.  Timing (RUN) produces either two or three "allocating
segment" messages (size 16), and runs for 4.998 seconds if two
and 5.13 seconds if three.

This is better than half a Cray-1, if I read your charts correctly!

∂09-Jun-84  0114	EJG@S1-A.ARPA 	S-1 STAK time  
Received: from S1-A.ARPA by SU-AI.ARPA with TCP; 9 Jun 84  01:14:00 PDT
Date: 09 Jun 84  0110 PDT
From: Erik Gilbert <EJG@S1-A.ARPA>
Subject: S-1 STAK time 
To:   rpg@SU-AI.ARPA
CC:   PMF@S1-A.ARPA  

So, I patched around the EXCH instruction in SPECIAL-LOOKUP and
(TAK 18 12 6) now gets the right answer.  The official time
reported by EVALT is 4.31 seconds (also for (tester) 100 iteration
version).

Keep those cards and benchmarks coming in!  (Which ones next, coach?)

∂18-Jun-84  2043	GSB@MIT-ML 	timings 
Received: from MIT-ML.ARPA by SU-AI.ARPA with TCP; 18 Jun 84  20:35:43 PDT
Date: 18 June 1984 23:35 EDT
From: Glenn S. Burke <GSB @ MIT-ML>
Subject: timings
To: rpg @ SU-AI

Sunguroff found some anomalies between some of your sail times and
those he was doing on mc in parallel with ml.  From the few i
glanced at, it appears to be those with lots of gc time.  Were you
running with a gc-daemon?  I don't believe that the gc-daemon run
time gets charged to the gc-time, but even if it did, ken church,
when doing some benchmarking on bill martin's parser, found that
the time to do interrupt signalling in maclisp which called the gc-daemon
was measurable.

Yes, there is a GC-daemon in use. Which direction is the anomaly?
Would it help if you also knew that the Lisp I used is a full 256k
image running on a 3 megaword KL10? That is, no paging, no swapping.
			-rpg-

∂20-Jun-84  1828	GSB@MIT-ML 	gc-daemon timing  
Received: from MIT-ML.ARPA by SU-AI.ARPA with TCP; 20 Jun 84  18:27:48 PDT
Date: 20 June 1984 21:23 EDT
From: Glenn S. Burke <GSB @ MIT-ML>
Subject: gc-daemon timing
To: rpg @ SU-AI

I don't think the no-paging/no-swapping is all that significant in
comparison;  i believe ITS gives moderately consistent runtime figures
irrespective of the load (not perfect, but i don't think it's that far
off).

I really have forgotten all the details about this problem.  The error
is that runtime on behalf of running the gc-daemon is not being ascribed
to the gc-time.  I don't know whether just a measurable fraction of
it is being left out of the gc-time, or if ALL of it is being left out.
The GC runs the gc-daemon using maclisp's user-interrupt queuing mechanism.
Even if Maclisp, in calling the gc-daemon, were to charge that runtime
to the gc-time, i believe that the runtime of getting into there to
actually run the thing would be noticeable.  Without digging up the
times you gave Sunguroff and his timings, i couldn't say for sure,
however my suspicion is that in your case the entire time of the
gc-daemon is being counted as your runtime.  Are you using the
Baker gc-daemon?  If so, what version?  Version 14 here has the code
in it which attempts to charge its own runtime to the gctime, but
there is still significant entry/exit overhead which it misses.

∂26-Jun-84  1323	vanroggen%bach.DEC@decwrl.ARPA 	vax lisp on 11/7xx
Received: from DECWRL.ARPA by SU-AI.ARPA with TCP; 26 Jun 84  13:22:26 PDT
Received: from DEC-RHEA.ARPA by decwrl.ARPA (4.22.01/4.7.31)
	id AA01424; Tue, 26 Jun 84 13:22:39 pdt
Message-Id: <8406262022.AA01424@decwrl.ARPA>
Date: Tuesday, 26 Jun 1984 13:21:59-PDT
From: vanroggen%bach.DEC@decwrl.ARPA
To: rpg@su-ai, vanroggen%bach.DEC@decwrl.ARPA
Subject: vax lisp on 11/7xx


Name of		VAXLisp		VAXLisp		VAXLisp		VAXLisp
application	U1.0-20		U1.0-20		U1.0-20		U1.0-20
Benchmark	VMS V3.5	VMS V3.5	VMS V3.5	VMS V3.6
		730 (No FPA)	750 (No FPA)	780		785 (No FPA)
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------

Boyer		258.98		69.38		46.79		30.36
		(GC 180.82)	(GC 79.30)	(GC 40.90)	(GC 28.38)

-------------------------------------------------------------------------------

Browse		540.58		195.11		118.51		73.30
		(GC 380.50)	(GC 164.05)	(GC 86.50)	(GC 59.30)

-------------------------------------------------------------------------------

Deriv		62.54		24.50		13.76		9.65
		(GC 116.96)	(GC 49.63)	(GC 26.20)	(GC 17.81)

-------------------------------------------------------------------------------

Destru		26.41		11.30		6.38		4.35

-------------------------------------------------------------------------------

Div2 
  Iterative	18.21		9.07		5.00		3.36
  Recursive	43.89		14.32		9.84		5.98
		(GC 56.48)	(GC 24.85)	(GC 12.85)	(GC 8.92)

-------------------------------------------------------------------------------

FFT		293.46		131.59		76.80		46.54
		(GC 240.56)	(GC 101.84)	(GC 53.84)	(GC 37.32)

-------------------------------------------------------------------------------

Fprint		18.30		6.08		3.94		2.32

-------------------------------------------------------------------------------

Fread		29.83		11.21		7.24		4.02

-------------------------------------------------------------------------------

FRPOLY	
  Power = 2
    (Fix)	0.17		0.06		0.03		0.02
    (Big)	0.17		0.06		0.04		0.02
    (Flo)	0.16		0.06		0.04		0.03

  Power = 5
    (Fix)	0.94		0.37		0.23		0.14
    (Big)	1.46		0.60		0.36		0.24
    (Flo)	1.29		0.48		0.30		0.19

  Power = 10
    (Fix & Big) 8.91		3.38		2.13		1.34
    (Big)	17.30		7.25		4.45		2.81
    (Flo)	12.80		4.69		2.89		1.77

  Power = 15
    (Fix & Big) 55.45		21.51		13.21		8.36
    (Big)	139.49		57.00		34.48		21.83
		(GC 122.14)	(GC 51.82)	(GC 26.99)	(GC 18.95)
    (Flo)	85.16		31.05		17.84		11.03
		(GC 61.52)	(GC 26.80)	(GC 13.78)	(GC 9.68)

-------------------------------------------------------------------------------

Puzzle
  array		512.56		231.79		128.92		85.44
  list		59.36		23.74		14.25		8.80

-------------------------------------------------------------------------------

Tak
  Tak		10.55		2.69		1.83		1.43
  Takf		27.05		10.43		6.53		4.62
  Takl		34.15		12.35		7.34		5.41
  Takr		15.63		4.40		3.42		2.09
  Ctak		34.86		13.86		8.09		5.77
  Stak		20.96		6.21		4.11		2.92

-------------------------------------------------------------------------------

Tprint		12.56		4.11		2.85		1.54

-------------------------------------------------------------------------------

Traverse
  Init		94.77		35.45		20.76		14.03
  Run		804.02		217.21		161.68		107.43

-------------------------------------------------------------------------------

Triang		2865.87		1021.35		660.78		452.82

-------------------------------------------------------------------------------

Wang		2.19		0.70		0.47		0.30

-------------------------------------------------------------------------------

Whetstone (KIPs)450.06 (120.37)	162.41 (381.10)	93.34 (618.43)	59.66 (992.06)
		(GC 234.38)	(GC 99.97)	(GC 52.96)	(GC 36.60)
FORTRAN V2			(400)
-------------------------------------------------------------------------------

Total 
  CPU		2:22:08		0:53:22		0:32:44		0:21:41
  Elapse	2:42:45		1:03:33		0:36:33		0:27:45
  Page faults	135748		174773		226934		280852
  Peak WS size	2048		2048		2518		832

∂26-Jun-84  1324	vanroggen%bach.DEC@decwrl.ARPA 	vax lisp vs. nil/franz 
Received: from DECWRL.ARPA by SU-AI.ARPA with TCP; 26 Jun 84  13:23:26 PDT
Received: from DEC-RHEA.ARPA by decwrl.ARPA (4.22.01/4.7.31)
	id AA01437; Tue, 26 Jun 84 13:23:33 pdt
Message-Id: <8406262023.AA01437@decwrl.ARPA>
Date: Tuesday, 26 Jun 1984 13:22:57-PDT
From: vanroggen%bach.DEC@decwrl.ARPA
To: rpg@su-ai, vanroggen%bach.DEC@decwrl.ARPA
Subject: vax lisp vs. nil/franz


Name of			VAXLisp		NIL		FRANZ Lisp
application		U1.0-20		259.11		Opus 38.79
Benchmark		VMS V3.5	VMS V3.5	Ultrix-32 FT2
			750 (No FPA)	750 (No FPA)	750 (No FPA)
---------------------------------------------------------------------------
---------------------------------------------------------------------------

Boyer			69.38 		234.06		35.37
			(GC 79.30)			(GC 125.85)

---------------------------------------------------------------------------

Browse			195.11		298.38		729.52
			(GC 164.05)			(GC 148.25)

---------------------------------------------------------------------------

Deriv			24.50		35.62		17.30
			(GC 49.63)			(GC 48.17)
	
---------------------------------------------------------------------------

Destru			11.30		23.99		8.96
							(GC 9.27)

---------------------------------------------------------------------------

Div2 
  Iterative		9.07		19.11		5.42
							(GC 25.13)
  Recursive		14.32		24.21		11.12
			(GC 24.85)			(GC 25.43)

---------------------------------------------------------------------------

FFT			131.59		77.42		330.45
			(GC 101.84)			(GC 42.35)

---------------------------------------------------------------------------

Fprint			6.08		39.93		1.17

---------------------------------------------------------------------------

Fread			11.21		28.75		2.00

---------------------------------------------------------------------------

FRPOLY	
  Power = 2
    r=x+y+z+1 (Fix)	0.06		0.06		0.03
    r2=1000*r (Big)	0.06		0.24		0.05
    r3=r (Flo)		0.06		0.06		0.03

  Power = 5
    r=x+y+z+1 (Fix)	0.37		0.49		0.35
    r2=1000*r (Big)	0.60		2.31		0.58 
							(GC 0.8)
    r3=r (Flo)		0.48		0.57		0.33
							(GC 0.8)

  Power = 10
    r=x+y+z+1 (Fix & Big) 3.38		5.1		3.40
							(GC 2.73)
    r2=1000*r (Big)	7.25		37.53		8.10
							(GC 8.0)
    r3=r (Flo)		4.69		5.73		3.51
							(GC 4.07)

  Power = 15
    r=x+y+z+1 (Fix & Big) 21.51		41.15		22.10
							(GC 7.12)
    r2=1000*r (Big)	57.00		444.53		67.50
			(GC 51.82)			(GC 51.8)
    r3=r (Flo)		31.05		35.15		22.42
			(GC 26.80)			(GC 12.50)

---------------------------------------------------------------------------

Puzzle
  array			231.79		606.48		305.8
							(GC 1.55)
  list			23.74		106.19		24.77
							(GC 0.9)

---------------------------------------------------------------------------

Tak
  Tak			2.69		8.35		2.1
  Takf-funcall		10.43		12.14		13.90
  Takl-list		12.35		59.14		11.52
  Takr-Tak0...Tak99	4.40		10.18		2.83
  Ctak-catch & throw	13.86		15.56		25.72
  Stak-special		6.21		30.69		11.67

---------------------------------------------------------------------------

Tprint			4.11		48.26		0.92

---------------------------------------------------------------------------

Traverse
  Init-traverse		35.45		156.28		40.70
							(GC 60.77)
  Run-traverse		217.21		719.18		259.02

---------------------------------------------------------------------------

Triang			1021.35		2682.47		3038.80
							(GC 10.08)

---------------------------------------------------------------------------

Wang			0.70		1.31		0.72

---------------------------------------------------------------------------

Whetstone (KIPs)	162.41 (381.10)	148.94 (402.9)	230.9 (259.63)
			(GC 99.97)			(GC 97.68)
FORTRAN V2		(400)
---------------------------------------------------------------------------

∂19-May-84  0122	GJC@MIT-MC
Received: from MIT-MC.ARPA by SU-AI.ARPA with TCP; 19 May 84  01:21:52 PDT
Date: 19 May 1984 04:22-EDT
From: George J. Carrette <GJC @ MIT-MC>
To: RPG @ SU-AI

I got a printout of 3600 timings from a copy of a letter sent to Masinter
which you sent to our west-coast marketting people. About the same time I
ran them on a "system-98" (lexical scoping, common-lisp version) machine
which was "full-function-PC-BOARD" (cache, blockmode bus transfers, 
100 nanosecond "source" 100 nanosecond "destination").
This isn't really full function though, in that new microinstructions and
data paths such as the arithmetic-masker-prom (ability to do arithmetic on
a fixed (burned in prom) subfield of the pointer, useful for fixnum
arithmetic and various pointer hacking), multiplier chip, read-dispatch
and memory-reference (good for CAR, CDR, AREF), have been tested but
not taken fully advantage of in the microcode. Anyway, I have some interesting
results on some things. Timings are realtime, with disk wait taken out,
but without any funny without-interrupt kind of thing. (By the way,
these tests will be very nice for evaluating the cache and block-transfer
effect on the 2-processor lambda arrangement). The numbers given are
ratios, LAMBDA-time/3600-time. What is the best way to measure the time?

CTAK 0.62
DDERIV 1.12
DERIV  0.9
DESTRU 0.99
DIV2-1 0.56
DIV2-2 0.67

Those are the most vanilla lisp code examples. The worst result for
the LAMBDA was on FFT. Do you have a translation of that particular FFT
example in FORTRAN? I want to get an idea of how fast it should run,
on a 750 say, or 68000.

Needless to say, keep these numbers hush-hush for now, at least you will
have an idea of what you may be getting. -gjc

∂11-Jul-84  1750	EJG  	S-1 CTAK time 
The official S-1 CTAK time now stands at 0.820 seconds.  That's running
a (TESTER) doing (TAK 18 12 6) 100 times and dividing by 100.

By hacking the LAP file manually (see CTAK.LAP[1,EJG]) to convert the
10 PUSHes (bad pipe characteristics) and 2 moves for setting up a catch
frame into one MOVP.P.A and 10 moves, I got it down to 0.793 seconds.
This should be an easy change to CODEGE (but it only bought about 3 percent).

This compares to 0.6 on the Cray-1; our best yet!

∂12-Jul-84  1851	RPG  
Triang 124.7, 62.06 with separate record-answer function

∂17-Jul-84  0738	KESSLER@UTAH-20.ARPA 	Timing Report
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 17 Jul 84  07:38:00 PDT
Date: Tue 17 Jul 84 08:39:15-MDT
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: Timing Report
To: rpg@SU-AI.ARPA

Here is the specifics on the Apollo.  If it isn't sufficient, let me know what
other information you would like to see and I'll hack it up.  By the way, I'm
not planning on bringing any prepared slides or talk to the panel, is that a 
correct assumption?

Bob.
---------------------
One of the key characteristics of the Apollo systems is their 10 MB per
second ring network.  The network is completely transparent to applications
programs allowing among other things, a network wide file system.  It also
allows virtual memory operations to be performed over the network.  In a
typical environment there may be a number of nodes without disks and a few
nodes with attached disks used as file servers.

The timing tests were run on two versions of the Apollo Domain system.   The
characteristics of each system are:

DN300 - Has a Motorola 68010 processor (10 MHz Clock speed), 1.5 MB
        internal memory and no local disk.  Therefore virtual memory paging
	was performed on the network.

DN600 - Has dual Motorola 68000 processors (both are also 10 Mhz Clock
        speed, with one used to service page faults of the other
	processor), 2.0 MB internal memory, a PEB (performance enhancement
	board with hardware floating point) and a local disk.

The PSL features a fairly good interface to Apollo's operating system, and with
release 3.2, is now fairly robust.  A running PSL image occupies around 2.5
MB of virtual memory, so the paging overhead is non-trivial.  Otherwise,
its a standard PSL system.
-------

∂17-Jul-84  0156	EJG  	New S-1 PUZZLE time
With the new CODEGEN special lookup code improvement for global variables,
PUZZLE takes 1.829 seconds.  (Interestingly enough, if an extra LET binding
is added to make three specials non-global, it takes 6.604 seconds!)

∂20-Jul-84  0807	KESSLER@UTAH-20.ARPA 	[jwa@lanl: combined times]  
Received: from UTAH-20.ARPA by SU-AI.ARPA with TCP; 20 Jul 84  08:07:48 PDT
Date: Fri 20 Jul 84 09:09:01-MDT
From: Robert R. Kessler <KESSLER@UTAH-20.ARPA>
Subject: [jwa@lanl: combined times]
To: rpg@SU-AI.ARPA

More timings.  Of primary note is that we've corrected the bug in IO that
was causing the fread and fprint to be horribly slow.  Once again your
tests help to spot problems with an implementation.  I hope that you make
that clear in your talk.  Finally, I think that we've got BIGNUM's finally
working, which will make fft and the middle fpoly's to give work.  If so,
I'll pass the numbers on.

Bob.
                ---------------

Mail-From: NOT-LOGGED-IN created at 19-Jul-84 10:55:05
Return-Path: <jwa@lanl>
Received: from lanl by UTAH-20.ARPA with TCP; Thu 19 Jul 84 10:55:07-MDT
Date: 19 Jul 1984 10:50:22-MDT
From: jwa@lanl
Received: by LANL.ARPA (4.12/4.7)
	id AA04638; Thu, 19 Jul 84 10:50:06 mdt
Date: Thu, 19 Jul 84 10:50:06 mdt
From: jwa@lanl (Wayne Anderson)
Message-Id: <8407191650.AA04638@LANL.ARPA>
To: kessler@utah-20
Subject: combined times

Bob,

The following is a table of the benchmark timings where New Psl means the 
latest version with Puzzle, etc. fixed.


	    | KL-10b |   20-60 | 20-60  | 3600   |  20-60  |  CRAY  |  NEW
Benchmark   |MACLISP |InterLisp| PSL 3.2| ZetaL  | PSL 3.3 | PSL 3.2| PSL 3.2
------------------------------------------------------------------------------
Boyer       |  6.47  |  25.458 | 11.74  |   12   |  11.87  | 1.853  | 1.905
Browse      | 13.64  |  70.321 | 23.82  |  32.1  |  23.06  | 4.677  | 4.678
Destruct    |  2.16  |   9.206 | 2.381  |  3.94  |   1.85  | 0.451  | 0.446
Traverse    |        |         |        |        |         |        |
 Initialize |  6.69  |  37.62  |  7.596 |  12.3  |  6.368  |        |
 Traverse   | 23.96  | 85.862  | 43.226 | 51.23  | 34.381  |        |
Tak         | 0.489  |  2.088  | 0.4662 |  0.59  |   .478  | 0.045  | 0.044
STak        |  3.5   |  6.379  | 2.682  |  2.48  |  2.685  | 1.112  | 1.113
CTak        |  2.85  | 44.672  | 2.958  |  8.84  |  2.995  | 0.606  | 0.593
Takl        |  2.81  |  3.786  | 2.521  |  6.32  |  2.533  | 0.307  | 0.308
Takr        | 0.488  |  2.162  | 0.609  |  0.59  |  0.61   | 0.069  | 0.068
Deriv       |  1.81  |  40.21  | 5.384  | 11.55  |  5.49   | 1.280  | 1.308
DDeriv      |  2.83  |  28.067 |  6.04  |  14.6  |  6.61   | 1.422  | 1.444
Fdderiv     |  2.15  |    -    |   -    |   -    |         |        |
Div2  
 Iterative  | 0.844  | 131.858 |   2.3  |  4.8   |  2.205  | 0.581  | 0.582
 Recursive  |  1.28  |  68.208 |   2.34 |  6.27  |  2.246  | 0.575  | 0.608
FFT         |  4.0   |   12.6  | 35.517 |  4.77  | 37.9    |
Puzzle      |  7.87  | 121.028 | 15.92  | 14.21  |  4.285  | 1.007  | 1.010
Triang      | 86.03  |2326.439 | 86.574 | 158.1  | 78.347  | 14.54  | 14.44
Fprint      |  0.78  |  4.451  | 4.665  |  3.1   | 4.636   | 3.618  | 0.378
Fread       |  0.98  |  4.476  | 5.725  |  6.25  | 5.131   | 20.41  | 0.632
Tprint      |  0.81  |  4.727  | 4.351  |  7.8   | 4.083   | 0.190  | 0.195
Frpoly, power = 2    
 r=x+y+z+1  | 5.0E-3 |    -    | 0.023  | 9.0E-3 | 0.021   | 1.3E-3 | 1.3E-3
 r2=1000*r  | 5.0E-3 |    -    | 0.039  | 0.017  | 0.044   |        | 1.3E-3?
 r3=r,flonum| 4.0E-3 |    -    | 0.025  | 9.0E-3 | 0.025   | 1.9E-3 | 1.9E-3
Frpoly, power = 5    
 r=x+y+z+1  | 0.042  |    -    | 0.093  |  0.88  | 0.108   | 0.013  | 0.013
 r2=1000*r  | 0.067  |    -    | 0.425  |  0.26  | 0.358   |        | 0.009?
 r3=r,flonum| 0.042  |    -    | 0.136  | 0.094  | 0.166   | 0.020  | 0.020
Frpoly, power = 10   
 r=x+y+z+1  | 0.467  |    -    | 0.921  |  1.01  | 0.897   | 0.142  | 0.143
 r2=1000*r  | 0.926  |    -    | 6.46   |  5.1   | 6.199   |        | 0.033?
 r3=r,flonum|  0.47  |    -    | 1.509  |  1.07  | 1.603   | 0.229  | 0.229
Frpoly, power = 15   
 r=x+y+z+1  |  3.15  |    -    | 12.68  |  6.4   | 12.268  | 0.942  | 0.961
 r2=1000*r  |  9.43  |    -    | 68.195 |  50.3  | 67.636  |        | 0.005?
 r3=r,flonum|  3.16  |    -    | 11.138 |  6.8   | 11.514  | 1.606  | 1.639
--------------------------------------------------------------------


HP-20 is latest PSL 3.3, extended addressing.
SAIL is MacLisp on a dec10 (kl10b)
interlisp on a 2060
Symbolic 3600 with a lot of memory.
CRAY is PSL 3.2, LANL on Cray XMP
-------

---------------------


Wayne
-------

∂25-Jul-84  1804	GRISS%hp-hulk.csnet@csnet-relay.arpa 	Our uptodate table    
Received: from CSNET-PDN-GW by SU-AI.ARPA with TCP; 25 Jul 84  18:04:21 PDT
Received: From hp-labs.csnet by csnet-relay;  25 Jul 84 20:08 EDT
Received: by HP-VENUS id AA04993; Wed, 25 Jul 84 16:46:34 pdt
Message-Id: <8407252346.AA04993@HP-VENUS>
Date: Wed 25 Jul 84 16:47:20-PDT
From: Martin <GRISS%hp-labs.csnet@csnet-relay.arpa>
Subject: Our uptodate table
To: rpg@su-ai.arpa
Cc: GRISS@csnet-relay.arpa
Source-Info:  From (or Sender) name not authenticated.

This has some CRAY and HP times, both pascal and Unix.  Do you need
more detail. Also, I passed the benchmarks to Ager and McDonald, hope
they will try on 370.


                          GABRIEL BENCHMARKS

RPG: Wed 28 Mar 84 07:19:00-MST
RRK: 20-Jul-84 09:09 MDT CRAY
MLG: 4:04pm  Saturday, 28 April 1984
BS : 24-Jul-84 10:57:52

          |KL-10b|  20-60 | 20-60 | 3600 | 20-60 | CRAY | 12Mhz| HP-UX
Benchmark |MACLSP|InterLsp|PSL 3.2| ZetaL|PSL 3.3|PSL3.2| HP200|PSL3.3
--------------------------------------------------------------------------
Boyer     |  6.47|  25.458| 11.74 | 12   |  11.87| 1.905| 25.52| 25.19
Browse    | 13.64|  70.321| 23.82 | 32.1 |  23.06| 4.678| 39.84| 42.99
Destruct  |  2.16|   9.206|  2.381|  3.94|   1.85| 0.466|  4.25|  4.69
Traverse 
 Init     |  6.69|  37.62 | 7.596 | 12.3 |  6.368|      | 13.54| 14.89
 Traverse | 23.96| 85.862 |43.226 | 51.23| 34.381|	|108.69|102.36
Tak       | 0.489|  2.088 | 0.4662|  0.59|   .478| 0.044|  1.53|  1.51
STak      |  3.5 |  6.379 | 2.682 |  2.48|  2.685| 1.113| 11.71| 12.51
CTak      |  2.85| 44.672 | 2.958 |  8.84|  2.995| 0.593|  9.33|  9.49
Takl      |  2.81|  3.786 | 2.521 |  6.32|  2.533| 0.308|  5.73|  6.39
Takr      | 0.488|  2.162 | 0.609 |  0.59|  0.61 | 0.068|  1.76|  1.55
Deriv     |  1.81|  40.21 | 5.384 | 11.55|  5.49 | 1.308| 14.82| 15.66
DDeriv    |  2.83|  28.067| 6.04  | 14.6 |  6.61 | 1.444| 16.03| 16.78
Fdderiv   |  2.15|    -   |   -   |   -  |   -   |   -  |   -  |
Div2
 Iterative| 0.844| 131.858|  2.3  |  4.8 |  2.205| 0.582|  6.34|  6.58
 Recursive|  1.28|  68.208|  2.34 |  6.27|  2.246| 0.608|  5.87|  5.95
FFT       |  4.0 |   12.6 | 35.517|  4.77| 37.9  |      |132.94|131.10
Puzzle    |  7.87| 121.028| 15.92 | 14.21|  4.285| 1.010| 10.85| 12.48
Triang    | 86.03|2326.439| 86.574|158.1 | 78.347|14.44 |261.07|250.07
Fprint    |  0.78|  4.451 |  4.665|  3.1 | 4.636 | 0.378|  9.37|  3.40
Fread     |  0.98|  4.476 |  5.725|  6.25| 5.131 | 0.632|  5.16|  5.03
Tprint    |  0.81|  4.727 |  4.351|  7.8 | 4.083 | 0.195| 11.19|  7.82
Frpoly, power = 2    
 r=x+y+z+1|5.0E-3|    -   | 0.023 |9.0E-3| 0.021 |1.3E-3|  0.07|  0.06
 r2=1000*r|5.0E-3|    -   | 0.039 |0.017 | 0.044 |1.3E-3|  0.15|  0.15
 r3=r,flnm|4.0E-3|    -   | 0.025 |9.0E-3| 0.025 |1.9E-3|  0.09|  0.14
Frpoly, power = 5    
 r=x+y+z+1| 0.042|    -   | 0.093 | 0.88 | 0.108 | 0.013|  0.25|  0.24
 r2=1000*r| 0.067|    -   | 0.425 | 0.26 | 0.358 | .009?|  1.34|  1.60
 r3=r,flnm| 0.042|    -   | 0.136 | 0.094| 0.166 | 0.020|  0.41|  0.41
Frpoly, power = 10   
 r=x+y+z+1| 0.467|    -   | 0.921 |  1.01| 0.897 | 0.143|  2.24|  2.45
 r2=1000*r| 0.926|    -   | 6.46  |  5.1 | 6.199 | .033?| 26.06| 30.52
 r3=r,flnm|  0.47|    -   | 1.509 |  1.07| 1.603 | 0.228|  4.28|  4.73
Frpoly, power = 15   
 r=x+y+z+1|  3.15|    -   | 12.68 |  6.4 | 12.268| 0.936| 41.75| 47.19
 r2=1000*r|  9.43|    -   | 68.195| 50.3 | 67.636| .005?|406.15|480.73
 r3=r,flnm|  3.16|    -   | 11.138|  6.8 | 11.514| 1.594| 30.40| 33.73
---------------------------------------------------------------------------


HP-20 is latest PSL 3.3, extended addressing.
SAIL is MacLisp on a dec10 (kl10b)
interlisp on a 2060
Symbolic 3600 with a lot of memory.
CRAY is PSL 3.2, LANL on Cray XMP.
HP200 is PSL 3.3, on 68000 with 12Mhz clock and cache.
HP-UX is single user HP200, PSL 3.3, on 68000 with 12Mhz clock and cache.
-------

∂27-Jul-84  1151	JIM@CMU-CS-C.ARPA 	Perq Lisp bench marks
Received: from CMU-CS-C.ARPA by SU-AI.ARPA with TCP; 27 Jul 84  11:50:47 PDT
Received: ID <JIM@CMU-CS-C.ARPA>; Fri 27 Jul 84 14:21:03-EDT
Date: Fri 27 Jul 84 13:36:03-EDT
From: James.Muller@CMU-CS-C.ARPA
Subject: Perq Lisp bench marks
To: rpg@SU-AI.ARPA

I have rerun all of the Lisp bench marks in our new Lisp.  Skef Wholey has
also rerun some of them, getting similar times.  An overall speedup of 26%
was gained over your previous times.  Puzzle has sped up by a factor of over
17.  Here is a list of the best times from the new sets of timings we have
taken.

Benchmark       % of old time   Time    Machine used

Boyer           82%             159.80  Skef's T1
Browse          88%             439.98  Joe's T2
Destruct        85%              22.15  Joe's T2
Traverse Init   87%              71.30  Skef's T1
Traverse        77%             491.42  Jim's T0
Tak             83%               4.70  Skef's T1
STak            63%              13.03  Skef's T1
CTak            68%               8.48  Joe's T2
Takl            74%              26.98  Joe's T2
Takr            74%               7.52  Joe's T2
Deriv           78%             104.75  Joe's T2
DDeriv          90%             136.33  Joe's T2
Div2 Iterative  61%              38.43  Jim's T0
Div2 Recursive  77%              59.26  Joe's T2
FFT             92%              86.58  Joe's T2
Puzzle           5.74%          104.07  Joe's T2
Triangle       100%            1844.32  Joe's T2
Fprint                           16.38  RPG's T2
Fread                            23.07  RPG's T2
Tprint          54%              29.08  Joe's T2

Problems occurred with Fprint and Fread.  The best times I was able to get
for them were 25.05 and 25.38 seconds, respectively.  Your superior
times appear above.  It seems almost certain that you committed a clerical
error while recording the time for Fprint, and possible that your time for
Fread is in error.

These timings were run on a modified version of the file that you sent to us.
Some of the conversions to Common Lisp in that file were bogus, and some of
them weren't even real Common Lisp.  I beleive that Rob Machlachlan made
these conversions.  The new files appear below (the version of puzzle at
the end is the working one).

-- Jim


;;;; -*- Mode: Lisp; Package: User -*-

;;;; This file contains the common lisp version of the lisp performance benchmarks from Stanford.
;;;; These were translated and tested using Symbolics Common Lisp on a Symbolics
;;;; 3600.  The benchmarks in this file have not been "tuned" to any particular
;;;; implementation.  There is no Common Lisp timing function as these are 
;;;; highly system dependent.

;;;; Suggestions or problems to PW@SAIL.

;;; Nuke special declarations.
(eval-when (compile load eval)
  (setf (symbol-plist 'x) nil
	(symbol-plist 'y) nil
	(symbol-plist 'z) nil))

!;;; BOYER -- Logic programming benchmark, originally written by Bob Boyer.
;;; Fairly CONS intensive.

(defvar unify-subst)
(defvar temp-temp)

(defun add-lemma (term)
  (cond ((and (not (atom term))
	      (eq (car term)
		  (quote equal))
	      (not (atom (cadr term))))
	 (setf (get (car (cadr term)) (quote lemmas))
	       (cons term (get (car (cadr term)) (quote lemmas)))))
	(t (error "~%ADD-LEMMA did not like term:  ~a" term))))

(defun add-lemma-lst (lst)
  (cond ((null lst)
	 t)
	(t (add-lemma (car lst))
	   (add-lemma-lst (cdr lst)))))

(defun apply-subst (alist term)
  (cond ((atom term)
	 (cond ((setq temp-temp (assq term alist))
		(cdr temp-temp))
	       (t term)))
	(t (cons (car term)
		 (apply-subst-lst alist (cdr term))))))

(defun apply-subst-lst (alist lst)
  (cond ((null lst)
	 nil)
	(t (cons (apply-subst alist (car lst))
		 (apply-subst-lst alist (cdr lst))))))

(defun falsep (x lst)
  (or (equal x (quote (f)))
      (member x lst)))

(defun one-way-unify (term1 term2)
  (progn (setq unify-subst nil)
	 (one-way-unify1 term1 term2)))

(defun one-way-unify1 (term1 term2)
  (cond ((atom term2)
	 (cond ((setq temp-temp (assq term2 unify-subst))
		(equal term1 (cdr temp-temp)))
	       (t (setq unify-subst (cons (cons term2 term1)
					  unify-subst))
		  t)))
	((atom term1)
	 nil)
	((eq (car term1)
	     (car term2))
	 (one-way-unify1-lst (cdr term1)
			     (cdr term2)))
	(t nil)))

(defun one-way-unify1-lst (lst1 lst2)
  (cond ((null lst1)
	 t)
	((one-way-unify1 (car lst1)
			 (car lst2))
	 (one-way-unify1-lst (cdr lst1)
			     (cdr lst2)))
	(t nil)))

(defun rewrite (term)
  (cond ((atom term)
	 term)
	(t (rewrite-with-lemmas (cons (car term)
				      (rewrite-args (cdr term)))
				(get (car term)
				     (quote lemmas))))))

(defun rewrite-args (lst)
  (cond ((null lst)
	 nil)
	(t (cons (rewrite (car lst))
		 (rewrite-args (cdr lst))))))

(defun rewrite-with-lemmas (term lst)
  (cond ((null lst)
	 term)
	((one-way-unify term (cadr (car lst)))
	 (rewrite (apply-subst unify-subst (caddr (car lst)))))
	(t (rewrite-with-lemmas term (cdr lst)))))

(defun setup ()
  (add-lemma-lst
    (quote ((equal (compile form)
		   (reverse (codegen (optimize form)
				     (nil))))
	    (equal (eqp x y)
		   (equal (fix x)
			  (fix y)))
	    (equal (greaterp x y)
		   (lessp y x))
	    (equal (lesseqp x y)
		   (not (lessp y x)))
	    (equal (greatereqp x y)
		   (not (lessp x y)))
	    (equal (boolean x)
		   (or (equal x (t))
		       (equal x (f))))
	    (equal (iff x y)
		   (and (implies x y)
			(implies y x)))
	    (equal (even1 x)
		   (if (zerop x)
		       (t)
		       (odd (1- x))))
	    (equal (countps- l pred)
		   (countps-loop l pred (zero)))
	    (equal (fact- i)
		   (fact-loop i 1))
	    (equal (reverse- x)
		   (reverse-loop x (nil)))
	    (equal (divides x y)
		   (zerop (remainder y x)))
	    (equal (assume-true var alist)
		   (cons (cons var (t))
			 alist))
	    (equal (assume-false var alist)
		   (cons (cons var (f))
			 alist))
	    (equal (tautology-checker x)
		   (tautologyp (normalize x)
			       (nil)))
	    (equal (falsify x)
		   (falsify1 (normalize x)
			     (nil)))
	    (equal (prime x)
		   (and (not (zerop x))
			(not (equal x (add1 (zero))))
			(prime1 x (1- x))))
	    (equal (and p q)
		   (if p (if q (t)
			     (f))
		       (f)))
	    (equal (or p q)
		   (if p (t)
		       (if q (t)
			   (f))
		       (f)))
	    (equal (not p)
		   (if p (f)
		       (t)))
	    (equal (implies p q)
		   (if p (if q (t)
			     (f))
		       (t)))
	    (equal (fix x)
		   (if (numberp x)
		       x
		       (zero)))
	    (equal (if (if a b c)
		       d e)
		   (if a (if b d e)
		       (if c d e)))
	    (equal (zerop x)
		   (or (equal x (zero))
		       (not (numberp x))))
	    (equal (plus (plus x y)
			 z)
		   (plus x (plus y z)))
	    (equal (equal (plus a b)
			  (zero))
		   (and (zerop a)
			(zerop b)))
	    (equal (difference x x)
		   (zero))
	    (equal (equal (plus a b)
			  (plus a c))
		   (equal (fix b)
			  (fix c)))
	    (equal (equal (zero)
			  (difference x y))
		   (not (lessp y x)))
	    (equal (equal x (difference x y))
		   (and (numberp x)
			(or (equal x (zero))
			    (zerop y))))
	    (equal (meaning (plus-tree (append x y))
			    a)
		   (plus (meaning (plus-tree x)
				  a)
			 (meaning (plus-tree y)
				  a)))
	    (equal (meaning (plus-tree (plus-fringe x))
			    a)
		   (fix (meaning x a)))
	    (equal (append (append x y)
			   z)
		   (append x (append y z)))
	    (equal (reverse (append a b))
		   (append (reverse b)
			   (reverse a)))
	    (equal (times x (plus y z))
		   (plus (times x y)
			 (times x z)))
	    (equal (times (times x y)
			  z)
		   (times x (times y z)))
	    (equal (equal (times x y)
			  (zero))
		   (or (zerop x)
		       (zerop y)))
	    (equal (exec (append x y)
			 pds envrn)
		   (exec y (exec x pds envrn)
			 envrn))
	    (equal (mc-flatten x y)
		   (append (flatten x)
			   y))
	    (equal (member x (append a b))
		   (or (member x a)
		       (member x b)))
	    (equal (member x (reverse y))
		   (member x y))
	    (equal (length (reverse x))
		   (length x))
	    (equal (member a (intersect b c))
		   (and (member a b)
			(member a c)))
	    (equal (nth (zero)
			i)
		   (zero))
	    (equal (exp i (plus j k))
		   (times (exp i j)
			  (exp i k)))
	    (equal (exp i (times j k))
		   (exp (exp i j)
			k))
	    (equal (reverse-loop x y)
		   (append (reverse x)
			   y))
	    (equal (reverse-loop x (nil))
		   (reverse x))
	    (equal (count-list z (sort-lp x y))
		   (plus (count-list z x)
			 (count-list z y)))
	    (equal (equal (append a b)
			  (append a c))
		   (equal b c))
	    (equal (plus (remainder x y)
			 (times y (quotient x y)))
		   (fix x))
	    (equal (power-eval (big-plus1 l i base)
			       base)
		   (plus (power-eval l base)
			 i))
	    (equal (power-eval (big-plus x y i base)
			       base)
		   (plus i (plus (power-eval x base)
				 (power-eval y base))))
	    (equal (remainder y 1)
		   (zero))
	    (equal (lessp (remainder x y)
			  y)
		   (not (zerop y)))
	    (equal (remainder x x)
		   (zero))
	    (equal (lessp (quotient i j)
			  i)
		   (and (not (zerop i))
			(or (zerop j)
			    (not (equal j 1)))))
	    (equal (lessp (remainder x y)
			  x)
		   (and (not (zerop y))
			(not (zerop x))
			(not (lessp x y))))
	    (equal (power-eval (power-rep i base)
			       base)
		   (fix i))
	    (equal (power-eval (big-plus (power-rep i base)
					 (power-rep j base)
					 (zero)
					 base)
			       base)
		   (plus i j))
	    (equal (gcd x y)
		   (gcd y x))
	    (equal (nth (append a b)
			i)
		   (append (nth a i)
			   (nth b (difference i (length a)))))
	    (equal (difference (plus x y)
			       x)
		   (fix y))
	    (equal (difference (plus y x)
			       x)
		   (fix y))
	    (equal (difference (plus x y)
			       (plus x z))
		   (difference y z))
	    (equal (times x (difference c w))
		   (difference (times c x)
			       (times w x)))
	    (equal (remainder (times x z)
			      z)
		   (zero))
	    (equal (difference (plus b (plus a c))
			       a)
		   (plus b c))
	    (equal (difference (add1 (plus y z))
			       z)
		   (add1 y))
	    (equal (lessp (plus x y)
			  (plus x z))
		   (lessp y z))
	    (equal (lessp (times x z)
			  (times y z))
		   (and (not (zerop z))
			(lessp x y)))
	    (equal (lessp y (plus x y))
		   (not (zerop x)))
	    (equal (gcd (times x z)
			(times y z))
		   (times z (gcd x y)))
	    (equal (value (normalize x)
			  a)
		   (value x a))
	    (equal (equal (flatten x)
			  (cons y (nil)))
		   (and (nlistp x)
			(equal x y)))
	    (equal (listp (gopher x))
		   (listp x))
	    (equal (samefringe x y)
		   (equal (flatten x)
			  (flatten y)))
	    (equal (equal (greatest-factor x y)
			  (zero))
		   (and (or (zerop y)
			    (equal y 1))
			(equal x (zero))))
	    (equal (equal (greatest-factor x y)
			  1)
		   (equal x 1))
	    (equal (numberp (greatest-factor x y))
		   (not (and (or (zerop y)
				 (equal y 1))
			     (not (numberp x)))))
	    (equal (times-list (append x y))
		   (times (times-list x)
			  (times-list y)))
	    (equal (prime-list (append x y))
		   (and (prime-list x)
			(prime-list y)))
	    (equal (equal z (times w z))
		   (and (numberp z)
			(or (equal z (zero))
			    (equal w 1))))
	    (equal (greatereqpr x y)
		   (not (lessp x y)))
	    (equal (equal x (times x y))
		   (or (equal x (zero))
		       (and (numberp x)
			    (equal y 1))))
	    (equal (remainder (times y x)
			      y)
		   (zero))
	    (equal (equal (times a b)
			  1)
		   (and (not (equal a (zero)))
			(not (equal b (zero)))
			(numberp a)
			(numberp b)
			(equal (1- a)
			       (zero))
			(equal (1- b)
			       (zero))))
	    (equal (lessp (length (delete x l))
			  (length l))
		   (member x l))
	    (equal (sort2 (delete x l))
		   (delete x (sort2 l)))
	    (equal (dsort x)
		   (sort2 x))
	    (equal (length (cons x1
				 (cons x2
				       (cons x3 (cons x4
						      (cons x5
							    (cons x6 x7)))))))
		   (plus 6 (length x7)))
	    (equal (difference (add1 (add1 x))
			       2)
		   (fix x))
	    (equal (quotient (plus x (plus x y))
			     2)
		   (plus x (quotient y 2)))
	    (equal (sigma (zero)
			  i)
		   (quotient (times i (add1 i))
			     2))
	    (equal (plus x (add1 y))
		   (if (numberp y)
		       (add1 (plus x y))
		       (add1 x)))
	    (equal (equal (difference x y)
			  (difference z y))
		   (if (lessp x y)
		       (not (lessp y z))
		       (if (lessp z y)
			   (not (lessp y x))
			   (equal (fix x)
				  (fix z)))))
	    (equal (meaning (plus-tree (delete x y))
			    a)
		   (if (member x y)
		       (difference (meaning (plus-tree y)
					    a)
				   (meaning x a))
		       (meaning (plus-tree y)
				a)))
	    (equal (times x (add1 y))
		   (if (numberp y)
		       (plus x (times x y))
		       (fix x)))
	    (equal (nth (nil)
			i)
		   (if (zerop i)
		       (nil)
		       (zero)))
	    (equal (last (append a b))
		   (if (listp b)
		       (last b)
		       (if (listp a)
			   (cons (car (last a))
				 b)
			   b)))
	    (equal (equal (lessp x y)
			  z)
		   (if (lessp x y)
		       (equal t z)
		       (equal f z)))
	    (equal (assignment x (append a b))
		   (if (assignedp x a)
		       (assignment x a)
		       (assignment x b)))
	    (equal (car (gopher x))
		   (if (listp x)
		       (car (flatten x))
		       (zero)))
	    (equal (flatten (cdr (gopher x)))
		   (if (listp x)
		       (cdr (flatten x))
		       (cons (zero)
			     (nil))))
	    (equal (quotient (times y x)
			     y)
		   (if (zerop y)
		       (zero)
		       (fix x)))
	    (equal (get j (set i val mem))
		   (if (eqp j i)
		       val
		       (get j mem)))))))

(defun tautologyp (x true-lst false-lst)
  (cond ((truep x true-lst)
	 t)
	((falsep x false-lst)
	 nil)
	((atom x)
	 nil)
	((eq (car x)
	     (quote if))
	 (cond ((truep (cadr x)
		       true-lst)
		(tautologyp (caddr x)
			    true-lst false-lst))
	       ((falsep (cadr x)
			false-lst)
		(tautologyp (cadddr x)
			    true-lst false-lst))
	       (t (and (tautologyp (caddr x)
				   (cons (cadr x)
					 true-lst)
				   false-lst)
		       (tautologyp (cadddr x)
				   true-lst
				   (cons (cadr x)
					 false-lst))))))
	(t nil)))

(defun tautp (x)
  (tautologyp (rewrite x)
	      nil nil))

(defun test ()
  (prog (ans term)
	(setq term
	      (apply-subst
		(quote ((x f (plus (plus a b)
				   (plus c (zero))))
			(y f (times (times a b)
				    (plus c d)))
			(z f (reverse (append (append a b)
					      (nil))))
			(u equal (plus a b)
			   (difference x y))
			(w lessp (remainder a b)
			   (member a (length b)))))
		(quote (implies (and (implies x y)
				     (and (implies y z)
					  (and (implies z u)
					       (implies u w))))
				(implies x w)))))
	(setq ans (tautp term))))

(defun trans-of-implies (n)
  (list (quote implies)
	(trans-of-implies1 n)
	(list (quote implies)
	      0 n)))

(defun trans-of-implies1 (n)
  (cond ((equal n 1)			; I think (eql n 1) may work here
	 (list (quote implies)
	       0 1))
	(t (list (quote and)
		 (list (quote implies)
		       (1- n)
		       n)
		 (trans-of-implies1 (1- n))))))

(defun truep (x lst)
       (oR (equal x (quote (t)))
	   (member x lst)))

(eval-when (load eval)
  (setup))

;;; make sure you've run (setup) then call:  (test)


!;;; BROWSE -- Benchmark to create and browse through an AI-like data base of units.

;;; n is # of symbols
;;; m is maximum amount of stuff on the plist
;;; npats is the number of basic patterns on the unit
;;; ipats is the instantiated copies of the patterns

(defvar rand 21.)

(defmacro char1 (x) `(aref (string ,x) 0))	; maybe SYMBOL-NAME

(defun init (n m npats ipats)
  (let ((ipats (copy-tree ipats)))
    (do ((p ipats (cdr p)))
	((null (cdr p)) (rplacd p ipats)))	
    (do ((n n (1- n))
	 (i m (cond ((= i 0) m)
		    (t (1- i))))
	 (name (intern (gensym)) (intern (gensym)))
	 (a ()))
	((= n 0) a)
      (push name a)
      (do ((i i (1- i)))
	  ((= i 0))
	(setf (get name (gensym)) nil))
      (setf (get name 'pattern)
	    (do ((i npats (1- i))
		 (ipats ipats (cdr ipats))
		 (a ()))
		((= i 0) a)
	      (push (car ipats) a)))
      (do ((j (- m i) (1- j)))
	  ((= j 0))
	(setf (get name (gensym)) nil)))))  


(defun browse-random ()
  (setq rand (mod (* rand 17.) 251.)))

(defun randomize (l)
  (do ((a ()))
      ((null l) a)
    (let ((n (mod (browse-random) (length l))))
      (cond ((= n 0)
	     (push (car l) a)
	     (setq l (cdr l)))
	    (t 
	     (do ((n n (1- n))
		  (x l (cdr x)))
		 ((= n 1)
		  (push (cadr x) a)
		  (rplacd x (cddr x)))))))))


(defun match (pat dat alist)
  (cond ((null pat)
	 (null dat))
	((null dat) ())
	((or (eq (car pat) '?)
	     (eq (car pat)
		 (car dat)))
	 (match (cdr pat) (cdr dat) alist))
	((eq (car pat) '*)
	 (or (match (cdr pat) dat alist)
	     (match (cdr pat) (cdr dat) alist)
	     (match pat (cdr dat) alist)))
	(t (cond ((atom (car pat))
		  (cond ((eq (char1 (car pat)) #\?)
			 (let ((val (assoc (car pat) alist)))
			   (cond (val (match (cons (cdr val)
						   (cdr pat))
					     dat alist))
				 (t (match (cdr pat)
					   (cdr dat)
					   (cons (cons (car pat)
						       (car dat))
						 alist))))))
			((eq (char1 (car pat)) #\*)
			 (let ((val (assoc (car pat) alist)))
			   (cond (val (match (append (cdr val)
						     (cdr pat))
					     dat alist))
				 (t 
				  (do ((l () (nconc l (cons (car d) nil)))
				       (e (cons () dat) (cdr e))
				       (d dat (cdr d)))
				      ((null e) ())
				    (cond ((match (cdr pat) d
						  (cons (cons (car pat) l)
							alist))
					   (return t))))))))))
		 (t (and 
		      (not (atom (car dat)))
		      (match (car pat)
			     (car dat) alist)
		      (match (cdr pat)
			     (cdr dat) alist)))))))

(defun browse ()
  (investigate (randomize 
		 (init 100. 10. 4. '((a a a b b b b a a a a a b b a a a)
				     (a a b b b b a a
					(a a)(b b))
				     (a a a b (b a) b a b a))))
	       '((*a ?b *b ?b a *a a *b *a)
		 (*a *b *b *a (*a) (*b))
		 (? ? * (b a) * ? ?))))

(defun investigate (units pats)
  (do ((units units (cdr units)))
      ((null units))
    (do ((pats pats (cdr pats)))
	((null pats))
      (do ((p (get 'pattern (car units))
	      (cdr p)))
	  ((null p))
	(match (car pats) (car p) ())))))

;;; call: (browse)

!;;; CTAK -- A version of the TAKeuchi function that uses the CATCH/THROW facility.

(defun ctak (x y z)
  (catch 'ctak (ctak-aux x y z)))

(defun ctak-aux (x y z)
  (cond ((not (< y x))	;x≤y
	 (throw 'ctak z))
	(t (ctak-aux
	     (catch 'ctak
	       (ctak-aux (1- x)
			 y
			 z))
	     (catch 'ctak
	       (ctak-aux (1- y)
			 z
			 x))
	     (catch 'ctak
	       (ctak-aux (1- z)
			 x
			 y))))))

;;; call: (ctak 18. 12. 6.)

!;;; DDERIV -- The Common Lisp version of a symbolic derivative benchmark, written
;;; by Vaughn Pratt.

;;; This benchmark is a variant of the simple symbolic derivative program 
;;; (DERIV). The main change is that it is `table-driven.'  Instead of using a
;;; large COND that branches on the CAR of the expression, this program finds
;;; the code that will take the derivative on the property list of the atom in
;;; the CAR position. So, when the expression is (+ . <rest>), the code
;;; stored under the atom '+ with indicator DERIV will take <rest> and
;;; return the derivative for '+. The way that MacLisp does this is with the
;;; special form: (DEFUN (FOO BAR) ...). This is exactly like DEFUN with an
;;; atomic name in that it expects an argument list and the compiler compiles
;;; code, but the name of the function with that code is stored on the
;;; property list of FOO under the indicator BAR, in this case. You may have
;;; to do something like:

;;; :property keyword is not Common Lisp.

(defun dderiv-aux (a) 
  (list '/ (dderiv a) a))

(defun +-dderiv (a)
  (cons '+ (mapcar #'dderiv a)))
(setf (get '+ 'dderiv)  #'+-dderiv)

(defun --dderiv (a)
  (cons '- (mapcar #'dderiv a)))
(setf (get '- 'dderiv) #'--dderiv)

(defun *-dderiv (a)
  (list '* (cons '* a)
	(cons '+ (mapcar #'dderiv-aux a))))
(setf (get '* 'dderiv) #'*-dderiv)

(defun /-dderiv (a)
  (list '- 
	(list '/ 
	      (dderiv (car a)) 
	      (cadr a))
	(list '/ 
	      (car a) 
	      (list '*
		    (cadr a)
		    (cadr a)
		    (dderiv (cadr a))))))
(setf (get '/ 'dderiv) #'/-dderiv)

(defun dderiv (a)
  (cond 
    ((atom a)
     (cond ((eq a 'x) 1) (t 0)))
    (t (let ((dderiv (get (car a) 'dderiv)))
	 (cond (dderiv (funcall dderiv (cdr a)))
	       (t 'error))))))

(defun dderiv-run ()
  (declare (fixnum i))
  (do ((i 0 (1+ i)))
      ((= i 1000.))
    (dderiv '(+ (* 3 x x) (* a x x) (* b x) 5))
    (dderiv '(+ (* 3 x x) (* a x x) (* b x) 5))
    (dderiv '(+ (* 3 x x) (* a x x) (* b x) 5))
    (dderiv '(+ (* 3 x x) (* a x x) (* b x) 5))
    (dderiv '(+ (* 3 x x) (* a x x) (* b x) 5))))

;;; call:  (run)

!;;; DERIV -- This is the Common Lisp version of a symbolic derivative benchmark
;;; written by Vaughn Pratt.  It uses a simple subset of Lisp and does a lot of 
;;; CONSing. 

(defun deriv-aux (a) (list '/ (deriv a) a))

(defun deriv (a)
  (cond 
    ((atom a)
     (cond ((eq a 'x) 1) (t 0)))
    ((eq (car a) '+)	
     (cons '+ (mapcar #'deriv (cdr a))))
    ((eq (car a) '-) 
     (cons '- (mapcar #'deriv 
		      (cdr a))))
    ((eq (car a) '*)
     (list '* 
	   a 
	   (cons '+ (mapcar #'deriv-aux (cdr a)))))
    ((eq (car a) '/)
     (list '- 
	   (list '/ 
		 (deriv (cadr a)) 
		 (caddr a))
	   (list '/ 
		 (cadr a) 
		 (list '*
		       (caddr a)
		       (caddr a)
		       (deriv (caddr a))))))
    (t 'error))))

(defun deriv-run ()
  (declare (fixnum i))	;improves the code a little
  (do ((i 0 (1+ i)))
      ((= i 1000.))	;runs it 5000 times
    (deriv '(+ (* 3 x x) (* a x x) (* b x) 5))
    (deriv '(+ (* 3 x x) (* a x x) (* b x) 5))
    (deriv '(+ (* 3 x x) (* a x x) (* b x) 5))
    (deriv '(+ (* 3 x x) (* a x x) (* b x) 5))
    (deriv '(+ (* 3 x x) (* a x x) (* b x) 5))))

;;; call:  (run)

!;;; DESTRU -- Destructive operation benchmark

(defun destructive (n m)
  (let ((l (do ((i 10. (1- i))
		(a () (push () a)))
	       ((= i 0) a))))
    (do ((i n (1- i)))
	((= i 0))
      (cond ((null (car l))
	     (do ((l l (cdr l)))
		 ((null l))
	       (or (car l) 
		   (rplaca l (cons () ())))
	       (nconc (car l)
		      (do ((j m (1- j))
			   (a () (push () a)))
			  ((= j 0) a))))) 
	    (t
	     (do ((l1 l (cdr l1))
		  (l2 (cdr l) (cdr l2)))
		 ((null l2))
	       (rplacd (do ((j (floor (length (car l2)) 2) (1- j))
			    (a (car l2) (cdr a)))
			   ((zerop j) a)
			 (rplaca a i))
		       (let ((n (floor (length (car l1)) 2)))
			 (cond ((= n 0) (rplaca l1 ())
				(car l1))
			       (t 
				(do ((j n (1- j))
				     (a (car l1) (cdr a)))
				    ((= j 1)
				     (prog1 (cdr a)
					    (rplacd a ())))
				  (rplaca a i))))))))))))

;;; call:  (destructive 600. 50.)

!;;; DIV2 -- Benchmark which divides by 2 using lists of n ()'s.
;;; This file contains a recursive as well as an iterative test.

(defun create-n (n)
  (do ((n n (1- n))
       (a () (push () a)))
      ((= n 0) a)))

(defvar l (create-n 200.))

(defun iterative-div2 (l)
  (do ((l l (cddr l))
       (a () (push (car l) a)))
      ((null l) a)))

(defun recursive-div2 (l)
  (cond ((null l) ())
	(t (cons (car l) (recursive-div2 (cddr l))))))

(defun test-1 (l)
  (do ((i 300. (1- i)))
      ((= i 0))
    (iterative-div2 l)
    (iterative-div2 l)
    (iterative-div2 l)
    (iterative-div2 l)))

(defun test-2 (l)
  (do ((i 300. (1- i)))
      ((= i 0))
    (recursive-div2 l)
    (recursive-div2 l)
    (recursive-div2 l)
    (recursive-div2 l)))

;;; for the iterative test call: (test-1 l)
;;; for the recursive test call: (test-2 l)
	      
!;;; FFT -- This is an FFT benchmark written by Harry Barrow.
;;; It tests a variety of floating point operations, including array references.

(defvar re (make-array 1025. :element-type 'single-float :initial-element 0.0))	
(defvar im (make-array 1025. :element-type 'single-float :initial-element 0.0))	       

(defun fft					;fast fourier transform
       (areal aimag)				;areal = real part 
  (prog						;aimag = imaginary part
    (ar ai i j k m n le le1 ip nv2 nm1 ur ui wr wi tr ti)
    (setq ar areal				;initialize
          ai aimag
	  n (array-dimension ar 0)
	  n (1- n)
	  nv2 (floor n 2)
	  nm1 (1- n)
	  m 0					;compute m = log(n)
	  i 1)
 l1 (cond ((< i n)
	   (setq m (1+ m)
		 i (+ i i))
	   (go l1)))
    (cond ((not (equal n (expt 2 m)))
	   (princ "error ... array size not a power of two.")
	   (read)
	   (return (terpri))))
    (setq j 1					;interchange elements
	  i 1)					;in bit-reversed order
 l3 (cond ((< i j)
	   (setq tr (aref ar j)
		 ti (aref ai j))
	   (setf (aref ar j) (aref ar i))
	   (setf (aref ai j) (aref ai i))
	   (setf (aref ar i) tr)
	   (setf (aref ai i) ti)))
    (setq k nv2)
 l6 (cond ((< k j) 
	   (setq j (- j k)
		 k (/ k 2))
	   (go l6)))
    (setq j (+ j k)
	  i (1+ i))
    (cond ((< i n)
	   (go l3)))
    (do ((l 1 (1+ l)))
	((> l m))			;loop thru stages
      (setq le (expt 2 l)
	    le1 (floor le 2)
	    ur 1.0
	    ui 0.
	    wr (cos (/ pi (float le1)))
	    wi (sin (/ pi (float le1))))
      (do ((j 1 (1+ j)))
	  ((> j le1))		;loop thru butterflies
	(do ((i j (+ i le)))
	    ((> i n))		;do a butterfly
	  (setq ip (+ i le1)
		tr (- (* (aref ar ip) ur)
		      (* (aref ai ip) ui))
		ti (+ (* (aref ar ip) ui)
		      (* (aref ai ip) ur)))
	  (setf (aref ar ip) (- (aref ar i) tr))
	  (setf (aref ai ip) (- (aref ai i) ti))
	  (setf (aref ar i) (+ (aref ar i) tr))
	  (setf (aref ai i) (+ (aref ai i) ti))))
	(setq tr (- (* ur wr) (* ui wi))
	      ti (+ (* ur wi) (* ui wr))
	      ur tr
	      ui ti))
    (return t)))

;;; the timer which does 10 calls on fft

(defmacro fft-bench ()
  '(do ((ntimes 0 (1+ ntimes)))
      ((= ntimes 10.))
    (fft re im)))

;;; call:  (fft-bench)

!;;; FPRINT -- Benchmark to print to a file.

(defvar test-atoms '(abcdef12 cdefgh23 efghij34 ghijkl45 ijklmn56 klmnop67 
			      mnopqr78 opqrst89 qrstuv90 stuvwx01 uvwxyz12 
			      wxyzab23 xyzabc34 123456ab 234567bc 345678cd 
			      456789de 567890ef 678901fg 789012gh 890123hi))

(defun fprint-init-aux (m n atoms)
  (cond ((= m 0) (pop atoms))
	(t (do ((i n (- i 2))
		(a ()))
	       ((< i 1) a)
	     (push (pop atoms) a)
	     (push (fprint-init-aux (1- m) n atoms) a)))))

(defun fprint-init (m n atoms)
  (let ((atoms (subst () () atoms)))
    (do ((a atoms (cdr a)))
	((null (cdr a)) (rplacd a atoms)))
    (fprint-init-aux m n atoms)))

(defvar test-pattern (fprint-init 6. 6. test-atoms))

(defun fprint ()
  '(if (probe-file "fprint.tst")			; this seems a little wierd, subsequent calls to FPRINT will be slower
      (delete-file "fprint.tst"))
  (let ((stream (open "fprint.tst" :direction :output)))  ;defaults to STRING-CHAR
    (print test-pattern stream)
    (close stream)))

'(eval-when (compile load eval)
  (if (probe-file "fprint.tst")
      (delete-file "fprint.tst")))

;;; call:  (fprint)

!;;; FREAD -- Benchmark to read from a file.
;;; Pronounced "FRED".  Requires the existance of FPRINT.TST which is created
;;; by FPRINT.

(defun fread ()
  (let ((stream (open "fprint.tst" :direction :input)))
    (read stream)
    (close stream)))
	    
'(eval-when (compile load eval)
  (if (not (probe-file "fprint.tst"))
      (format t "~%Define FPRINT.TST by running the FPRINT benchmark!")))

;;; call: (fread))

!;;; FRPOLY -- Benchmark from Berkeley based on polynomial arithmetic.
;;; Originally writen in Franz Lisp by Richard Fateman.
;;; PDIFFER1 appears in the code, but is not defined; is not called for in this
;;; test, however.

(defvar ans)
(defvar coef)
(defvar f)
(defvar inc)
(defvar i)
(defvar qq)
(defvar ss)
(defvar v)
(defvar *x*)
(defvar *alpha*)
(defvar *a*)
(defvar *b*)
(defvar *chk)
(defvar *l)
(defvar *p)
(defvar q*)
(defvar u*)
(defvar *var)
(defvar *y*)
(defvar r)
(defvar r2)
(defvar r3)
(defvar start)
(defvar res1)
(defvar res2)
(defvar res3)

(defmacro pointergp (x y) `(> (get ,x 'order)(get ,y 'order)))
(defmacro pcoefp (e) `(atom ,e))

(defmacro pzerop (x) 
  `(if (numberp ,x) 					; no signp in CL	
       (zerop ,x)))		      
(defmacro pzero () 0)
(defmacro cplus (x y) `(+ ,x ,y))
(defmacro ctimes (x y) `(* ,x ,y))

(defun pcoefadd (e c x) 
  (if (pzerop c)
      x
      (cons e (cons c x))))

(defun pcplus (c p)
  (if (pcoefp p)
      (cplus p c)
      (psimp (car p) (pcplus1 c (cdr p)))))

(defun pcplus1 (c x)
  (cond ((null x)
	 (if (pzerop c)
	     nil
	     (cons 0 (cons c nil))))
	((pzerop (car x))
	 (pcoefadd 0 (pplus c (cadr x)) nil))
	(t
	 (cons (car x) (cons (cadr x) (pcplus1 c (cddr x)))))))

(defun pctimes (c p) 
  (if (pcoefp p)
      (ctimes c p)
      (psimp (car p) (pctimes1 c (cdr p)))))

(defun pctimes1 (c x)
  (if (null x)
      nil
      (pcoefadd (car x)
		(ptimes c (cadr x))
		(pctimes1 c (cddr x)))))

(defun pplus (x y) 
  (cond ((pcoefp x)
	 (pcplus x y))
	((pcoefp y)
	 (pcplus y x))
	((eq (car x) (car y))
	 (psimp (car x) (pplus1 (cdr y) (cdr x))))
	((pointergp (car x) (car y))
	 (psimp (car x) (pcplus1 y (cdr x))))
	(t
	 (psimp (car y) (pcplus1 x (cdr y))))))

(defun pplus1 (x y)
  (cond ((null x) y)
	((null y) x)
	((= (car x) (car y))
	 (pcoefadd (car x)
		   (pplus (cadr x) (cadr y))
		   (pplus1 (cddr x) (cddr y))))
	((> (car x) (car y))
	 (cons (car x) (cons (cadr x) (pplus1 (cddr x) y))))
	(t (cons (car y) (cons (cadr y) (pplus1 x (cddr y)))))))

(defun psimp (var x)
  (cond ((null x) 0)
	((atom x) x)
	((zerop (car x))
	 (cadr x))
	(t
	 (cons var x))))

(defun ptimes (x y) 
  (cond ((or (pzerop x) (pzerop y))
	 (pzero))
	((pcoefp x)
	 (pctimes x y))
	((pcoefp y)
	 (pctimes y x))
	((eq (car x) (car y))
	 (psimp (car x) (ptimes1 (cdr x) (cdr y))))
	((pointergp (car x) (car y))
	 (psimp (car x) (pctimes1 y (cdr x))))
	(t
	 (psimp (car y) (pctimes1 x (cdr y))))))

(defun ptimes1 (*x* y) 
  (prog (u* v)
	(setq v (setq u* (ptimes2 y)))
     a  
	(setq *x* (cddr *x*))
	(if (null *x*)
	    (return u*))
	(ptimes3 y)
	(go a)))

(defun ptimes2 (y)
  (if (null y)
      nil
      (pcoefadd (+ (car *x*) (car y))
		(ptimes (cadr *x*) (cadr y))
		(ptimes2 (cddr y)))))

(defun ptimes3 (y) 
  (prog (e u c) 
     a1	(if (null y) 
	    (return nil))
	(setq e (+ (car *x*) (car y))
	      c (ptimes (cadr y) (cadr *x*) ))
	(cond ((pzerop c)
	       (setq y (cddr y)) 
	       (go a1))
	      ((or (null v) (> e (car v)))
	       (setq u* (setq v (pplus1 u* (list e c))))
	       (setq y (cddr y))
	       (go a1))
	      ((= e (car v))
	       (setq c (pplus c (cadr v)))
	       (if (pzerop c) 			; never true, evidently
		   (setq u* (setq v (pdiffer1 u* (list (car v) (cadr v)))))
		   (rplaca (cdr v) c))
	       (setq y (cddr y))
	       (go a1)))
     a  (cond ((and (cddr v) (> (caddr v) e))
	       (setq v (cddr v))
	       (go a)))
	(setq u (cdr v))
     b  (if (or (null (cdr u)) (< (cadr u) e))
	    (rplacd u (cons e (cons c (cdr u)))) (go e))
	(cond ((pzerop (setq c (pplus (caddr u) c)))
	       (rplacd u (cdddr u))
	       (go d))
	      (t
	       (rplaca (cddr u) c)))
     e  (setq u (cddr u))
     d  (setq y (cddr y))
	(if (null y)
	    (return nil))
	(setq e (+ (car *x*) (car y))
	      c (ptimes (cadr y) (cadr *x*)))
     c  (cond ((and (cdr u) (> (cadr u) e))
	       (setq u (cddr u))
	       (go c)))
	(go b))) 

(defun pexptsq (p n)
  (do ((n (floor n 2) (floor n 2))
       (s (if (oddp n) p 1)))
      ((zerop n) s)
    (setq p (ptimes p p))
    (and (oddp n) (setq s (ptimes s p)))))

(eval-when (load eval)
  (setf (get 'x 'order) 1)
  (setf (get 'y 'order) 2)
  (setf (get 'z 'order) 3)
  (setq r (pplus '(x 1 1 0 1) (pplus '(y 1 1) '(z 1 1)))	; r= x+y+z+1
	r2 (ptimes r 100000)				 	; r2 = 100000*r
	r3 (ptimes r 1.0)))					; r3 = r with floating point coefficients	

;;; four sets of three tests, call:  (pexptsq r 2) (pexptsq r2 2) (pexptsq r3 2)
;				     (pexptsq r 5) (pexptsq r2 5) (pexptsq r3 5)
;				     (pexptsq r 10) (pexptsq r2 10) (pexptsq r3 10)
;				     (pexptsq r 15) (pexptsq r2 15) (pexptsq r3 15)
;
!;;; PUZZLE -- Forest Baskett's Puzzle benchmark, originally written in Pascal.

(eval-when (compile load eval)
  (defconstant size 511.)	
  (defconstant classmax 3.)
  (defconstant typemax 12.))

(defvar iii 0)
(defvar kount 0)
(defvar d 8.)

(defvar piececount (make-array (1+ classmax) :initial-element 0))
(defvar class (make-array (1+ typemax) :initial-element 0))
(defvar piecemax (make-array (1+ typemax) :initial-element 0))
(defvar puzzle (make-array (1+ size)))
(defvar p (make-array (list (1+ typemax) (1+ size))))

(defun fit (i j)
  (let ((end (aref piecemax i)))
    (do ((k 0 (1+ k)))
	((> k end) t)
      (cond ((aref p i k)
	     (cond ((aref puzzle (+ j k))
		    (return nil))))))))

(defun place (i j)
  (let ((end (aref piecemax i)))
    (do ((k 0 (1+ k)))
	((> k end))
      (cond ((aref p i k) 
	     (setf (aref puzzle (+ j k)) t))))
    (setf (aref piececount (aref class i)) (- (aref piececount (aref class i)) 1))
    (do ((k j (1+ k)))
	((> k size)
;	 (terpri)
;	 (princ "Puzzle filled")
	 0)
      (cond ((not (aref puzzle k))
	     (return k))))))

(defun puzzle-remove (i j)
  (let ((end (aref piecemax i)))
    (do ((k 0 (1+ k)))
	((> k end))
      (cond ((aref p i k) (setf (aref puzzle (+ j k))  nil)))
      (setf (aref piececount (aref class i))
	    (+ (aref piececount (aref class i)) 1)))))

(defun trial (j)
  (let ((k 0))
    (do ((i 0 (1+ i)))
	((> i typemax) (setq kount (1+ kount)) 	 nil)
      (cond ((not (= (aref piececount (aref class i)) 0))
	     (cond ((fit i j)
		    (setq k (place i j))
		    (cond ((or (trial k)
			       (= k 0))
;			   (format t "~%Piece ~4D at ~4D." (+ i 1) (+ k 1))
			   (setq kount (+ kount 1))
			   (return t))
			  (t (puzzle-remove i j))))))))))

(defun definepiece (iclass ii jj kk)
  (let ((index 0))
    (do ((i 0 (1+ i)))
	((> i ii))
      (do ((j 0 (1+ j)))
	  ((> j jj))
	(do ((k 0 (1+ k)))
	    ((> k kk))
	  (setq index  (+ i (* d (+ j (* d k)))))
	  (setf (aref p iii index)  t))))
    (setf (aref class iii) iclass)
    (setf (aref piecemax iii) index) 
    (cond ((not (= iii typemax))
	   (setq iii (+ iii 1))))))

(defun start ()
  (do ((m 0 (1+ m)))
      ((> m size))
    (setf (aref puzzle m) t))
  (do ((i 1 (1+ i)))
      ((> i 5))
    (do ((j 1 (1+ j)))
	((> j 5))
      (do ((k 1 (1+ k)))
	  ((> k 5))
	(setf (aref puzzle (+ i (* d (+ j (* d k))))) nil))))
  (do ((i 0 (1+ i)))
      ((> i typemax))
    (do ((m 0 (1+ m)))
	((> m size))
      (setf (aref p i m)  nil)))
  (setq iii 0)
  (definePiece 0 3 1 0)
  (definePiece 0 1 0 3)
  (definePiece 0 0 3 1)
  (definePiece 0 1 3 0)
  (definePiece 0 3 0 1)
  (definePiece 0 0 1 3)
  
  (definePiece 1 2 0 0)
  (definePiece 1 0 2 0)
  (definePiece 1 0 0 2)
  
  (definePiece 2 1 1 0)
  (definePiece 2 1 0 1)
  (definePiece 2 0 1 1)
  
  (definePiece 3 1 1 1)
  
  (setf (aref pieceCount 0) 13.)
  (setf (aref pieceCount 1) 3)
  (setf (aref pieceCount 2) 1)
  (setf (aref pieceCount 3) 1)
  (let ((m (+ 1 (* d (+ 1 d))))
	(n 0)(kount 0))
    (cond ((fit 0 m) (setq n (place 0 m)))
	  (t (format t "~%Error.")))
    (cond ((trial n) 
	   (format t "~%Success in ~4D trials." kount))
	  (t (format t "~%Failure.")))))

;;; call:  (start)


!;;; TAK -- A vanilla version of the TAKeuchi function and one with tail recursion
;;; removed.

(defun tak (x y z)
  (if (not (< y x))				;x≤y
      z
      (tak (tak (1- x) y z)
	      (tak (1- y) z x)
	      (tak (1- z) x y))))


(defun trtak (x y z)
  (prog ()
     tak
	(if (not (< y x))
	    (return z)
	    (let ((a (tak (1- x) y z))
		  (b (tak (1- y) z x)))
	      (setq z (tak (1- z) x y)
		    x a
		    y b)
	      (go tak)))))

;;; call:  tak (tak 18. 12. 6.)


!;;; TAKL -- The TAKeuchi function using lists as counters.

(defun listn (n)
  (if (not (= 0 n))
      (cons n (listn (1- n)))))

(defvar 18l (listn 18.))
(defvar 12l (listn 12.))
(defvar  6l (listn 6.))

(defun mas (x y z)
  (if (not (shorterp y x))
      z
      (mas (mas (cdr x)
		 y z)
	    (mas (cdr y)
		 z x)
	    (mas (cdr z)
		 x y))))

(defun shorterp (x y)
  (and y (or (null x)
	     (shorterp (cdr x)
		       (cdr y)))))

;;; call: (mas 18l 12l 6l)


!;;; TAKR  -- 100 function (count `em) version of TAK that tries to defeat cache
;;; memory effects.  Results should be the same as for TAK on stack machines.
;;; Distribution of calls is not completely flat.

;;; call:  (tak0 18. 12. 6.)	

(defun tak0 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak1 (tak37 (1- x) y z)
		 (tak11 (1- y) z x)
		 (tak17 (1- z) x y)))))
(defun tak1 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak2 (tak74 (1- x) y z)
		 (tak22 (1- y) z x)
		 (tak34 (1- z) x y)))))
(defun tak2 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak3 (tak11 (1- x) y z)
		 (tak33 (1- y) z x)
		 (tak51 (1- z) x y)))))
(defun tak3 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak4 (tak48 (1- x) y z)
		 (tak44 (1- y) z x)
		 (tak68 (1- z) x y)))))
(defun tak4 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak5 (tak85 (1- x) y z)
		 (tak55 (1- y) z x)
		 (tak85 (1- z) x y)))))
(defun tak5 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak6 (tak22 (1- x) y z)
		 (tak66 (1- y) z x)
		 (tak2 (1- z) x y)))))
(defun tak6 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak7 (tak59 (1- x) y z)
		 (tak77 (1- y) z x)
		 (tak19 (1- z) x y)))))
(defun tak7 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak8 (tak96 (1- x) y z)
		 (tak88 (1- y) z x)
		 (tak36 (1- z) x y)))))
(defun tak8 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak9 (tak33 (1- x) y z)
		 (tak99 (1- y) z x)
		 (tak53 (1- z) x y)))))
(defun tak9 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak10 (tak70 (1- x) y z)
		  (tak10 (1- y) z x)
		  (tak70 (1- z) x y)))))
(defun tak10 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak11 (tak7 (1- x) y z)
		  (tak21 (1- y) z x)
		  (tak87 (1- z) x y)))))
(defun tak11 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak12 (tak44 (1- x) y z)
		  (tak32 (1- y) z x)
		  (tak4 (1- z) x y)))))
(defun tak12 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak13 (tak81 (1- x) y z)
		  (tak43 (1- y) z x)
		  (tak21 (1- z) x y)))))

(defun tak13 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak14 (tak18 (1- x) y z)
		  (tak54 (1- y) z x)
		  (tak38 (1- z) x y)))))
(defun tak14 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak15 (tak55 (1- x) y z)
		  (tak65 (1- y) z x)
		  (tak55 (1- z) x y)))))
(defun tak15 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak16 (tak92 (1- x) y z)
		  (tak76 (1- y) z x)
		  (tak72 (1- z) x y)))))
(defun tak16 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak17 (tak29 (1- x) y z)
		  (tak87 (1- y) z x)
		  (tak89 (1- z) x y)))))
(defun tak17 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak18 (tak66 (1- x) y z)
		  (tak98 (1- y) z x)
		  (tak6 (1- z) x y)))))
(defun tak18 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak19 (tak3 (1- x) y z)
		  (tak9 (1- y) z x)
		  (tak23 (1- z) x y)))))
(defun tak19 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak20 (tak40 (1- x) y z)
		  (tak20 (1- y) z x)
		  (tak40 (1- z) x y)))))
(defun tak20 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak21 (tak77 (1- x) y z)
		  (tak31 (1- y) z x)
		  (tak57 (1- z) x y)))))
(defun tak21 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak22 (tak14 (1- x) y z)
		  (tak42 (1- y) z x)
		  (tak74 (1- z) x y)))))
(defun tak22 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak23 (tak51 (1- x) y z)
		  (tak53 (1- y) z x)
		  (tak91 (1- z) x y)))))
(defun tak23 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak24 (tak88 (1- x) y z)
		  (tak64 (1- y) z x)
		  (tak8 (1- z) x y)))))
(defun tak24 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak25 (tak25 (1- x) y z)
		  (tak75 (1- y) z x)
		  (tak25 (1- z) x y)))))
(defun tak25 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak26 (tak62 (1- x) y z)
		  (tak86 (1- y) z x)
		  (tak42 (1- z) x y)))))
(defun tak26 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak27 (tak99 (1- x) y z)
		  (tak97 (1- y) z x)
		  (tak59 (1- z) x y)))))
(defun tak27 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak28 (tak36 (1- x) y z)
		  (tak8 (1- y) z x)
		  (tak76 (1- z) x y)))))
(defun tak28 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak29 (tak73 (1- x) y z)
		  (tak19 (1- y) z x)
		  (tak93 (1- z) x y)))))
(defun tak29 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak30 (tak10 (1- x) y z)
		  (tak30 (1- y) z x)
		  (tak10 (1- z) x y)))))
(defun tak30 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak31 (tak47 (1- x) y z)
		  (tak41 (1- y) z x)
		  (tak27 (1- z) x y)))))
(defun tak31 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak32 (tak84 (1- x) y z)
		  (tak52 (1- y) z x)
		  (tak44 (1- z) x y)))))
(defun tak32 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak33 (tak21 (1- x) y z)
		  (tak63 (1- y) z x)
		  (tak61 (1- z) x y)))))
(defun tak33 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak34 (tak58 (1- x) y z)
		  (tak74 (1- y) z x)
		  (tak78 (1- z) x y)))))
(defun tak34 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak35 (tak95 (1- x) y z)
		  (tak85 (1- y) z x)
		  (tak95 (1- z) x y)))))
(defun tak35 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak36 (tak32 (1- x) y z)
		  (tak96 (1- y) z x)
		  (tak12 (1- z) x y)))))
(defun tak36 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak37 (tak69 (1- x) y z)
		  (tak7 (1- y) z x)
		  (tak29 (1- z) x y)))))
(defun tak37 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak38 (tak6 (1- x) y z)
		  (tak18 (1- y) z x)
		  (tak46 (1- z) x y)))))
(defun tak38 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak39 (tak43 (1- x) y z)
		  (tak29 (1- y) z x)
		  (tak63 (1- z) x y)))))
(defun tak39 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak40 (tak80 (1- x) y z)
		  (tak40 (1- y) z x)
		  (tak80 (1- z) x y)))))
(defun tak40 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak41 (tak17 (1- x) y z)
		  (tak51 (1- y) z x)
		  (tak97 (1- z) x y)))))
(defun tak41 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak42 (tak54 (1- x) y z)
		  (tak62 (1- y) z x)
		  (tak14 (1- z) x y)))))
(defun tak42 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak43 (tak91 (1- x) y z)
		  (tak73 (1- y) z x)
		  (tak31 (1- z) x y)))))
(defun tak43 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak44 (tak28 (1- x) y z)
		  (tak84 (1- y) z x)
		  (tak48 (1- z) x y)))))
(defun tak44 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak45 (tak65 (1- x) y z)
		  (tak95 (1- y) z x)
		  (tak65 (1- z) x y)))))
(defun tak45 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak46 (tak2 (1- x) y z)
		  (tak6 (1- y) z x)
		  (tak82 (1- z) x y)))))
(defun tak46 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak47 (tak39 (1- x) y z)
		  (tak17 (1- y) z x)
		  (tak99 (1- z) x y)))))
(defun tak47 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak48 (tak76 (1- x) y z)
		  (tak28 (1- y) z x)
		  (tak16 (1- z) x y)))))
(defun tak48 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak49 (tak13 (1- x) y z)
		  (tak39 (1- y) z x)
		  (tak33 (1- z) x y)))))
(defun tak49 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak50 (tak50 (1- x) y z)
		  (tak50 (1- y) z x)
		  (tak50 (1- z) x y)))))
(defun tak50 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak51 (tak87 (1- x) y z)
		  (tak61 (1- y) z x)
		  (tak67 (1- z) x y)))))
(defun tak51 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak52 (tak24 (1- x) y z)
		  (tak72 (1- y) z x)
		  (tak84 (1- z) x y)))))
(defun tak52 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak53 (tak61 (1- x) y z)
		  (tak83 (1- y) z x)
		  (tak1 (1- z) x y)))))
(defun tak53 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak54 (tak98 (1- x) y z)
		  (tak94 (1- y) z x)
		  (tak18 (1- z) x y)))))
(defun tak54 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak55 (tak35 (1- x) y z)
		  (tak5 (1- y) z x)
		  (tak35 (1- z) x y)))))
(defun tak55 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak56 (tak72 (1- x) y z)
		  (tak16 (1- y) z x)
		  (tak52 (1- z) x y)))))
(defun tak56 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak57 (tak9 (1- x) y z)
		  (tak27 (1- y) z x)
		  (tak69 (1- z) x y)))))
(defun tak57 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak58 (tak46 (1- x) y z)
		  (tak38 (1- y) z x)
		  (tak86 (1- z) x y)))))
(defun tak58 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak59 (tak83 (1- x) y z)
		  (tak49 (1- y) z x)
		  (tak3 (1- z) x y)))))
(defun tak59 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak60 (tak20 (1- x) y z)
		  (tak60 (1- y) z x)
		  (tak20 (1- z) x y)))))
(defun tak60 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak61 (tak57 (1- x) y z)
		  (tak71 (1- y) z x)
		  (tak37 (1- z) x y)))))
(defun tak61 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak62 (tak94 (1- x) y z)
		  (tak82 (1- y) z x)
		  (tak54 (1- z) x y)))))
(defun tak62 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak63 (tak31 (1- x) y z)
		  (tak93 (1- y) z x)
		  (tak71 (1- z) x y)))))
(defun tak63 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak64 (tak68 (1- x) y z)
		  (tak4 (1- y) z x)
		  (tak88 (1- z) x y)))))
(defun tak64 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak65 (tak5 (1- x) y z)
		  (tak15 (1- y) z x)
		  (tak5 (1- z) x y)))))
(defun tak65 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak66 (tak42 (1- x) y z)
		  (tak26 (1- y) z x)
		  (tak22 (1- z) x y)))))
(defun tak66 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak67 (tak79 (1- x) y z)
		  (tak37 (1- y) z x)
		  (tak39 (1- z) x y)))))
(defun tak67 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak68 (tak16 (1- x) y z)
		  (tak48 (1- y) z x)
		  (tak56 (1- z) x y)))))
(defun tak68 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak69 (tak53 (1- x) y z)
		  (tak59 (1- y) z x)
		  (tak73 (1- z) x y)))))
(defun tak69 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak70 (tak90 (1- x) y z)
		  (tak70 (1- y) z x)
		  (tak90 (1- z) x y)))))
(defun tak70 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak71 (tak27 (1- x) y z)
		  (tak81 (1- y) z x)
		  (tak7 (1- z) x y)))))
(defun tak71 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak72 (tak64 (1- x) y z)
		  (tak92 (1- y) z x)
		  (tak24 (1- z) x y)))))
(defun tak72 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak73 (tak1 (1- x) y z)
		  (tak3 (1- y) z x)
		  (tak41 (1- z) x y)))))
(defun tak73 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak74 (tak38 (1- x) y z)
		  (tak14 (1- y) z x)
		  (tak58 (1- z) x y)))))
(defun tak74 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak75 (tak75 (1- x) y z)
		  (tak25 (1- y) z x)
		  (tak75 (1- z) x y)))))
(defun tak75 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak76 (tak12 (1- x) y z)
		  (tak36 (1- y) z x)
		  (tak92 (1- z) x y)))))
(defun tak76 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak77 (tak49 (1- x) y z)
		  (tak47 (1- y) z x)
		  (tak9 (1- z) x y)))))
(defun tak77 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak78 (tak86 (1- x) y z)
		  (tak58 (1- y) z x)
		  (tak26 (1- z) x y)))))
(defun tak78 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak79 (tak23 (1- x) y z)
		  (tak69 (1- y) z x)
		  (tak43 (1- z) x y)))))
(defun tak79 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak80 (tak60 (1- x) y z)
		  (tak80 (1- y) z x)
		  (tak60 (1- z) x y)))))
(defun tak80 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak81 (tak97 (1- x) y z)
		  (tak91 (1- y) z x)
		  (tak77 (1- z) x y)))))
(defun tak81 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak82 (tak34 (1- x) y z)
		  (tak2 (1- y) z x)
		  (tak94 (1- z) x y)))))
(defun tak82 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak83 (tak71 (1- x) y z)
		  (tak13 (1- y) z x)
		  (tak11 (1- z) x y)))))
(defun tak83 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak84 (tak8 (1- x) y z)
		  (tak24 (1- y) z x)
		  (tak28 (1- z) x y)))))
(defun tak84 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak85 (tak45 (1- x) y z)
		  (tak35 (1- y) z x)
		  (tak45 (1- z) x y)))))
(defun tak85 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak86 (tak82 (1- x) y z)
		  (tak46 (1- y) z x)
		  (tak62 (1- z) x y)))))
(defun tak86 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak87 (tak19 (1- x) y z)
		  (tak57 (1- y) z x)
		  (tak79 (1- z) x y)))))
(defun tak87 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak88 (tak56 (1- x) y z)
		  (tak68 (1- y) z x)
		  (tak96 (1- z) x y)))))
(defun tak88 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak89 (tak93 (1- x) y z)
		  (tak79 (1- y) z x)
		  (tak13 (1- z) x y)))))
(defun tak89 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak90 (tak30 (1- x) y z)
		  (tak90 (1- y) z x)
		  (tak30 (1- z) x y)))))
(defun tak90 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak91 (tak67 (1- x) y z)
		  (tak1 (1- y) z x)
		  (tak47 (1- z) x y)))))
(defun tak91 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak92 (tak4 (1- x) y z)
		  (tak12 (1- y) z x)
		  (tak64 (1- z) x y)))))
(defun tak92 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak93 (tak41 (1- x) y z)
		  (tak23 (1- y) z x)
		  (tak81 (1- z) x y)))))
(defun tak93 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak94 (tak78 (1- x) y z)
		  (tak34 (1- y) z x)
		  (tak98 (1- z) x y)))))
(defun tak94 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak95 (tak15 (1- x) y z)
		  (tak45 (1- y) z x)
		  (tak15 (1- z) x y)))))
(defun tak95 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak96 (tak52 (1- x) y z)
		  (tak56 (1- y) z x)
		  (tak32 (1- z) x y)))))
(defun tak96 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak97 (tak89 (1- x) y z)
		  (tak67 (1- y) z x)
		  (tak49 (1- z) x y)))))
(defun tak97 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak98 (tak26 (1- x) y z)
		  (tak78 (1- y) z x)
		  (tak66 (1- z) x y)))))
(defun tak98 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak99 (tak63 (1- x) y z)
		  (tak89 (1- y) z x)
		  (tak83 (1- z) x y)))))
(defun tak99 (x y z) 
  (cond ((not (< y x)) z)
	(t (tak0 (tak0 (1- x) y z)
		 (tak0 (1- y) z x)
		 (tak0 (1- z) x y)))))
!;;; STAK -- The TAKeuchi function with special variables instead of parameter passing.

(defvar x)
(defvar y)
(defvar z)

(defun stak (x y z)
  (stak-aux))

(defun stak-aux ()
  (if (not (< y x))				; x≤y
      z
      (let ((x (let ((x (1- x))
		     (y y)
		     (z z))
		 (stak-aux)))
	    (y (let ((x (1- y))
		     (y z)
		     (z x))
		 (stak-aux)))
	    (z (let ((x (1- z))
		     (y x)
		     (z y))
		 (stak-aux))))
	(stak-aux))))

;;; call:  (stak 18. 12. 6.))

(eval-when (compile load eval)
  (setf (symbol-plist 'x) nil
	(symbol-plist 'y) nil
	(symbol-plist 'z) nil))

!;;; TPRINT -- Benchmark to print and read to the terminal.

(defvar test-atoms '(abc1 cde2 efg3 ghi4 ijk5 klm6 mno7 opq8 qrs9
			  stu0 uvw1 wxy2 xyz3 123a 234b 345c 456d 
			  567d 678e 789f 890g))

(defun tprint-init (m n atoms)
  (let ((atoms (subst () () atoms)))
    (do ((a atoms (cdr a)))
	((null (cdr a)) (rplacd a atoms)))
    (tprint-init-aux m n atoms)))

(defun tprint-init-aux (m n atoms)
  (cond ((= m 0) (pop atoms))
	(t (do ((i n (- i 2))
		(a ()))
	       ((< i 1) a)
	     (push (pop atoms) a)
	     (push (tprint-init-aux (1- m) n atoms) a)))))

(defvar test-pattern (tprint-init 6. 6. test-atoms))

;;; call:  (print test-pattern)


!;;; TRAVERSE --  Benchmark which creates and traverses a tree structure.

(defstruct node
  (parents ())
  (sons ())
  (sn (snb))
  (entry1 ())
  (entry2 ())
  (entry3 ())
  (entry4 ())
  (entry5 ())
  (entry6 ())
  (mark ()))

(defvar sn 0)
(defvar rand 21.)
(defvar count 0)
(defvar marker nil)
(defvar root)

(defun snb ()
  (setq sn (1+ sn)))

(defun seed ()
  (setq rand 21.))

(defun traverse-random () (setq rand (mod (* rand 17.) 251.)))

(defun traverse-remove (n q)
  (cond ((eq (cdr (car q)) (car q))
	 (prog2 () (caar q) (rplaca q ())))
	((= n 0)
	 (prog2 () (caar q)
		(do ((p (car q) (cdr p)))
		    ((eq (cdr p) (car q))
		     (rplaca q
			     (rplacd p (cdr (car q))))))))
	(t (do ((n n (1- n))
		(q (car q) (cdr q))
		(p (cdr (car q)) (cdr p)))
	       ((= n 0) (prog2 () (car q) (rplacd q p)))))))

(defun traverse-select (n q)
  (do ((n n (1- n))
       (q (car q) (cdr q)))
      ((= n 0) (car q))))

(defun add (a q)
  (cond ((null q)
	 `(,(let ((x `(,a)))
	      (rplacd x x) x)))
	((null (car q))
	 (let ((x `(,a)))
	   (rplacd x x)
	   (rplaca q x)))
	(t (rplaca q
		   (rplacd (car q) `(,a .,(cdr (car q))))))))

(defun create-structure (n)
  (let ((a `(,(make-node))))
    (do ((m (1- n) (1- m))
	 (p a))
	((= m 0) (setq a `(,(rplacd p a)))
	 (do ((unused a)
	      (used (add (traverse-remove 0 a) ()))
	      (x) (y))
	     ((null (car unused))
	      (find-root (traverse-select 0 used) n))
	   (setq x (traverse-remove (mod (traverse-random) n) unused))
	   (setq y (traverse-select (mod (traverse-random) n) used))
	   (add x used)
	   (setf (node-sons y) `(,x .,(node-sons y)))
	   (setf (node-parents x) `(,y .,(node-parents x))) ))
      (push (make-node) a))))

(defun find-root (node n)
  (do ((n n (1- n)))
      ((= n 0) node)
    (cond ((null (node-parents node))
	   (return node))
	  (t (setq node (car (node-parents node)))))))

(defun travers (node mark)
  (cond ((eq (node-mark node) mark) ())
	(t (setf (node-mark node) mark)
	   (setq count (1+ count))
	   (setf (node-entry1 node) (not (node-entry1 node)))
	   (setf (node-entry2 node) (not (node-entry2 node)))
	   (setf (node-entry3 node) (not (node-entry3 node)))
	   (setf (node-entry4 node) (not (node-entry4 node)))
	   (setf (node-entry5 node) (not (node-entry5 node)))
	   (setf (node-entry6 node) (not (node-entry6 node)))
	   (do ((sons (node-sons node) (cdr sons)))
	       ((null sons) ())
	     (travers (car sons) mark)))))



(defun traverse (root)
  (let ((count 0))
    (travers root (setq marker (not marker)))
    count))

(defmacro init-traverse()
  (prog2 (setq root (create-structure 100.)) ()))

(defmacro run-traverse ()
  (do ((i 50. (1- i)))
      ((= i 0))
    (traverse root)
    (traverse root)
    (traverse root)
    (traverse root)
    (traverse root))) 

;;; to initialize, call:  (init-traverse)
;;; to run traverse, call:  (run-traverse)

!;;; TRIANG -- Board game benchmark.  

(eval-when (load eval)
  (defvar board (make-array 16. :initial-element 1))
  (defvar sequence (make-array 14. :initial-element 0))
  (defvar a (make-array 37. :initial-contents '(1 2 4 3 5 6 1 3 6 2 5 4 11. 12. 13. 7 8. 4 4 7 11 8 12
						  13. 6 10. 15. 9. 14. 13. 13. 14. 15. 9. 10. 6 6)))
  (defvar b (make-array 37. :initial-contents  '(2 4 7 5 8. 9. 3 6